summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CHANGES49
-rw-r--r--Hledger/Data.hs2
-rw-r--r--Hledger/Data/Account.hs10
-rw-r--r--Hledger/Data/AccountName.hs6
-rw-r--r--Hledger/Data/Amount.hs39
-rw-r--r--Hledger/Data/AutoTransaction.hs104
-rw-r--r--Hledger/Data/Commodity.hs1
-rw-r--r--Hledger/Data/Dates.hs261
-rw-r--r--Hledger/Data/Journal.hs56
-rw-r--r--Hledger/Data/Posting.hs3
-rw-r--r--Hledger/Data/Transaction.hs5
-rw-r--r--Hledger/Data/Types.hs8
-rw-r--r--Hledger/Read/Common.hs181
-rw-r--r--Hledger/Read/CsvReader.hs27
-rw-r--r--Hledger/Read/JournalReader.hs25
-rw-r--r--Hledger/Read/TimedotReader.hs2
-rw-r--r--Hledger/Reports/BalanceReport.hs55
-rw-r--r--Hledger/Reports/MultiBalanceReports.hs13
-rw-r--r--Hledger/Reports/ReportOptions.hs50
-rw-r--r--Hledger/Utils/Parse.hs3
-rw-r--r--hledger-lib.cabal123
-rw-r--r--hledger_csv.5 (renamed from doc/hledger_csv.5)125
-rw-r--r--hledger_csv.info (renamed from doc/hledger_csv.5.info)157
-rw-r--r--hledger_csv.txt (renamed from doc/hledger_csv.5.txt)69
-rw-r--r--hledger_journal.5 (renamed from doc/hledger_journal.5)284
-rw-r--r--hledger_journal.info (renamed from doc/hledger_journal.5.info)374
-rw-r--r--hledger_journal.txt (renamed from doc/hledger_journal.5.txt)210
-rw-r--r--hledger_timeclock.5 (renamed from doc/hledger_timeclock.5)18
-rw-r--r--hledger_timeclock.info (renamed from doc/hledger_timeclock.5.info)16
-rw-r--r--hledger_timeclock.txt (renamed from doc/hledger_timeclock.5.txt)8
-rw-r--r--hledger_timedot.5 (renamed from doc/hledger_timedot.5)14
-rw-r--r--hledger_timedot.info (renamed from doc/hledger_timedot.5.info)34
-rw-r--r--hledger_timedot.txt (renamed from doc/hledger_timedot.5.txt)2
33 files changed, 1557 insertions, 777 deletions
diff --git a/CHANGES b/CHANGES
index c97bcc2..44ec147 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,5 +1,50 @@
-API-ish changes in the hledger-lib package.
-See also the hledger and project change logs (for user-visible changes).
+API-ish changes in the hledger-lib package. See also hledger.
+
+
+# 1.5 (2017/12/31)
+
+* -V/--value uses today's market prices by default, not those of last transaction date. #683, #648)
+
+* csv: allow balance assignment (balance assertion only, no amount) in csv records (Nadrieril)
+
+* journal: allow space as digit group separator character, #330 (Mykola Orliuk)
+
+* journal: balance assertion errors now show line of failed assertion posting, #481 (Sam Jeeves)
+
+* journal: better errors for directives, #402 (Mykola Orliuk)
+
+* journal: better errors for included files, #660 (Mykola Orliuk)
+
+* journal: commodity directives in parent files are inherited by included files, #487 (Mykola Orliuk)
+
+* journal: commodity directives limits precision even after -B, #509 (Mykola Orliuk)
+
+* journal: decimal point/digit group separator chars are now inferred from an applicable commodity directive or default commodity directive. #399, #487 (Mykola Orliuk)
+
+* journal: numbers are parsed more strictly (Mykola Orliuk)
+
+* journal: support Ledger-style automated postings, enabled with --auto flag (Dmitry Astapov)
+
+* journal: support Ledger-style periodic transactions, enabled with --forecast flag (Dmitry Astapov)
+
+* period expressions: fix "nth day of {week,month}", which could generate wrong intervals (Dmitry Astapov)
+
+* period expressions: month names are now case-insensitive (Dmitry Astapov)
+
+* period expressions: stricter checking for invalid expressions (Mykola Orliuk)
+
+* period expressions: support "every 11th Nov" (Dmitry Astapov)
+
+* period expressions: support "every 2nd Thursday of month" (Dmitry Astapov)
+
+* period expressions: support "every Tuesday", short for "every <n>th day of week" (Dmitry Astapov)
+
+* remove upper bounds on all but hledger* and base (experimental)
+ It's rare that my deps break their api or that newer versions must
+ be avoided, and very common that they release new versions which I
+ must tediously and promptly test and release hackage revisions for
+ or risk falling out of stackage. Trying it this way for a bit.
+
# 1.4 (2017/9/30)
diff --git a/Hledger/Data.hs b/Hledger/Data.hs
index 4f3e836..cfd4bef 100644
--- a/Hledger/Data.hs
+++ b/Hledger/Data.hs
@@ -22,6 +22,7 @@ module Hledger.Data (
module Hledger.Data.StringFormat,
module Hledger.Data.Timeclock,
module Hledger.Data.Transaction,
+ module Hledger.Data.AutoTransaction,
module Hledger.Data.Types,
tests_Hledger_Data
)
@@ -42,6 +43,7 @@ import Hledger.Data.RawOptions
import Hledger.Data.StringFormat
import Hledger.Data.Timeclock
import Hledger.Data.Transaction
+import Hledger.Data.AutoTransaction
import Hledger.Data.Types
tests_Hledger_Data :: Test
diff --git a/Hledger/Data/Account.hs b/Hledger/Data/Account.hs
index 64cfbaf..e1dc824 100644
--- a/Hledger/Data/Account.hs
+++ b/Hledger/Data/Account.hs
@@ -10,6 +10,7 @@ account, and subaccounting-excluding and -including balances.
module Hledger.Data.Account
where
import Data.List
+import Data.List.Extra (groupSort, groupOn)
import Data.Maybe
import Data.Ord
import qualified Data.Map as M
@@ -63,10 +64,9 @@ nullacct = Account
accountsFromPostings :: [Posting] -> [Account]
accountsFromPostings ps =
let
- acctamts = [(paccount p,pamount p) | p <- ps]
- grouped = groupBy (\a b -> fst a == fst b) $ sort $ acctamts
- counted = [(a, length acctamts) | acctamts@((a,_):_) <- grouped]
- summed = map (\as@((aname,_):_) -> (aname, sumStrict $ map snd as)) grouped -- always non-empty
+ grouped = groupSort [(paccount p,pamount p) | p <- ps]
+ counted = [(aname, length amts) | (aname, amts) <- grouped]
+ summed = [(aname, sumStrict amts) | (aname, amts) <- grouped] -- always non-empty
nametree = treeFromPaths $ map (expandAccountName . fst) summed
acctswithnames = nameTreeToAccount "root" nametree
acctswithnumps = mapAccounts setnumps acctswithnames where setnumps a = a{anumpostings=fromMaybe 0 $ lookup (aname a) counted}
@@ -132,7 +132,7 @@ clipAccountsAndAggregate d as = combined
where
clipped = [a{aname=clipOrEllipsifyAccountName d $ aname a} | a <- as]
combined = [a{aebalance=sum (map aebalance same)}
- | same@(a:_) <- groupBy (\a1 a2 -> aname a1 == aname a2) clipped]
+ | same@(a:_) <- groupOn aname clipped]
{-
test cases, assuming d=1:
diff --git a/Hledger/Data/AccountName.hs b/Hledger/Data/AccountName.hs
index c11fa0c..d953ad9 100644
--- a/Hledger/Data/AccountName.hs
+++ b/Hledger/Data/AccountName.hs
@@ -56,9 +56,11 @@ accountNameLevel a = T.length (T.filter (==acctsepchar) a) + 1
accountNameDrop :: Int -> AccountName -> AccountName
accountNameDrop n = accountNameFromComponents . drop n . accountNameComponents
--- | ["a:b:c","d:e"] -> ["a","a:b","a:b:c","d","d:e"]
+-- | Sorted unique account names implied by these account names,
+-- ie these plus all their parent accounts up to the root.
+-- Eg: ["a:b:c","d:e"] -> ["a","a:b","a:b:c","d","d:e"]
expandAccountNames :: [AccountName] -> [AccountName]
-expandAccountNames as = nub $ concatMap expandAccountName as
+expandAccountNames as = nub $ sort $ concatMap expandAccountName as
-- | "a:b:c" -> ["a","a:b","a:b:c"]
expandAccountName :: AccountName -> [AccountName]
diff --git a/Hledger/Data/Amount.hs b/Hledger/Data/Amount.hs
index 247ac86..19e6a0c 100644
--- a/Hledger/Data/Amount.hs
+++ b/Hledger/Data/Amount.hs
@@ -58,6 +58,7 @@ module Hledger.Data.Amount (
-- ** arithmetic
costOfAmount,
divideAmount,
+ amountValue,
-- ** rendering
amountstyle,
showAmount,
@@ -90,6 +91,7 @@ module Hledger.Data.Amount (
isZeroMixedAmount,
isReallyZeroMixedAmount,
isReallyZeroMixedAmountCost,
+ mixedAmountValue,
-- ** rendering
showMixedAmount,
showMixedAmountOneLine,
@@ -113,6 +115,8 @@ import Data.Function (on)
import Data.List
import Data.Map (findWithDefault)
import Data.Maybe
+import Data.Time.Calendar (Day)
+import Data.Ord (comparing)
-- import Data.Text (Text)
import qualified Data.Text as T
import Test.HUnit
@@ -347,6 +351,38 @@ canonicaliseAmount styles a@Amount{acommodity=c, astyle=s} = a{astyle=s'}
where
s' = findWithDefault s c styles
+-- | Find the market value of this amount on the given date, in it's
+-- default valuation commodity, based on recorded market prices.
+-- If no default valuation commodity can be found, the amount is left
+-- unchanged.
+amountValue :: Journal -> Day -> Amount -> Amount
+amountValue j d a =
+ case commodityValue j d (acommodity a) of
+ Just v -> v{aquantity=aquantity v * aquantity a
+ ,aprice=aprice a
+ }
+ Nothing -> a
+
+-- This is here not in Commodity.hs to use the Amount Show instance above for debugging.
+-- | Find the market value, if known, of one unit of this commodity (A) on
+-- the given valuation date, in the commodity (B) mentioned in the latest
+-- applicable market price. The latest applicable market price is the market
+-- price directive for commodity A with the latest date that is on or before
+-- the valuation date; or if there are multiple such prices with the same date,
+-- the last parsed.
+commodityValue :: Journal -> Day -> CommoditySymbol -> Maybe Amount
+commodityValue j valuationdate c
+ | null applicableprices = dbg Nothing
+ | otherwise = dbg $ Just $ mpamount $ last applicableprices
+ where
+ dbg = dbg8 ("using market price for "++T.unpack c)
+ applicableprices =
+ [p | p <- sortBy (comparing mpdate) $ jmarketprices j
+ , mpcommodity p == c
+ , mpdate p <= valuationdate
+ ]
+
+
-------------------------------------------------------------------------------
-- MixedAmount
@@ -603,6 +639,9 @@ cshowMixedAmountOneLineWithoutPrice m = concat $ intersperse ", " $ map cshowAmo
canonicaliseMixedAmount :: M.Map CommoditySymbol AmountStyle -> MixedAmount -> MixedAmount
canonicaliseMixedAmount styles (Mixed as) = Mixed $ map (canonicaliseAmount styles) as
+mixedAmountValue :: Journal -> Day -> MixedAmount -> MixedAmount
+mixedAmountValue j d (Mixed as) = Mixed $ map (amountValue j d) as
+
-------------------------------------------------------------------------------
-- misc
diff --git a/Hledger/Data/AutoTransaction.hs b/Hledger/Data/AutoTransaction.hs
index d9ed912..28361f3 100644
--- a/Hledger/Data/AutoTransaction.hs
+++ b/Hledger/Data/AutoTransaction.hs
@@ -136,7 +136,8 @@ renderPostingCommentDates p = p { pcomment = comment' }
--
-- Note that new transactions require 'txnTieKnot' post-processing.
--
--- >>> mapM_ (putStr . show) $ runPeriodicTransaction (PeriodicTransaction "monthly from 2017/1 to 2017/4" ["hi" `post` usd 1]) nulldatespan
+-- >>> let gen str = mapM_ (putStr . show) $ runPeriodicTransaction (PeriodicTransaction str ["hi" `post` usd 1]) nulldatespan
+-- >>> gen "monthly from 2017/1 to 2017/4"
-- 2017/01/01
-- hi $1.00
-- <BLANKLINE>
@@ -146,6 +147,92 @@ renderPostingCommentDates p = p { pcomment = comment' }
-- 2017/03/01
-- hi $1.00
-- <BLANKLINE>
+-- >>> gen "monthly from 2017/1 to 2017/5"
+-- 2017/01/01
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/02/01
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/03/01
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/04/01
+-- hi $1.00
+-- <BLANKLINE>
+-- >>> gen "every 2nd day of month from 2017/02 to 2017/04"
+-- 2017/01/02
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/02/02
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/03/02
+-- hi $1.00
+-- <BLANKLINE>
+-- >>> gen "monthly from 2017/1 to 2017/4"
+-- 2017/01/01
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/02/01
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/03/01
+-- hi $1.00
+-- <BLANKLINE>
+-- >>> gen "every 30th day of month from 2017/1 to 2017/5"
+-- 2016/12/30
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/01/30
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/02/28
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/03/30
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/04/30
+-- hi $1.00
+-- <BLANKLINE>
+-- >>> gen "every 2nd Thursday of month from 2017/1 to 2017/4"
+-- 2016/12/08
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/01/12
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/02/09
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/03/09
+-- hi $1.00
+-- <BLANKLINE>
+-- >>> gen "every nov 29th from 2017 to 2019"
+-- 2016/11/29
+-- hi $1.00
+-- <BLANKLINE>
+-- 2017/11/29
+-- hi $1.00
+-- <BLANKLINE>
+-- 2018/11/29
+-- hi $1.00
+-- <BLANKLINE>
+-- >>> gen "2017/1"
+-- 2017/01/01
+-- hi $1.00
+-- <BLANKLINE>
+-- >>> gen ""
+-- ... Failed to parse ...
+-- >>> gen "weekly from 2017"
+-- *** Exception: Unable to generate transactions according to "weekly from 2017" as 2017-01-01 is not a first day of the week
+-- >>> gen "monthly from 2017/5/4"
+-- *** Exception: Unable to generate transactions according to "monthly from 2017/5/4" as 2017-05-04 is not a first day of the month
+-- >>> gen "every quarter from 2017/1/2"
+-- *** Exception: Unable to generate transactions according to "every quarter from 2017/1/2" as 2017-01-02 is not a first day of the quarter
+-- >>> gen "yearly from 2017/1/14"
+-- *** Exception: Unable to generate transactions according to "yearly from 2017/1/14" as 2017-01-14 is not a first day of the year
runPeriodicTransaction :: PeriodicTransaction -> (DateSpan -> [Transaction])
runPeriodicTransaction pt = generate where
base = nulltransaction { tpostings = ptpostings pt }
@@ -154,5 +241,18 @@ runPeriodicTransaction pt = generate where
(interval, effectspan) =
case parsePeriodExpr errCurrent periodExpr of
Left e -> error' $ "Failed to parse " ++ show (T.unpack periodExpr) ++ ": " ++ showDateParseError e
- Right x -> x
+ Right x -> checkProperStartDate x
generate jspan = [base {tdate=date} | span <- interval `splitSpan` spanIntersect effectspan jspan, let Just date = spanStart span]
+ checkProperStartDate (i,s) =
+ case (i,spanStart s) of
+ (Weeks _, Just d) -> checkStart d "week"
+ (Months _, Just d) -> checkStart d "month"
+ (Quarters _, Just d) -> checkStart d "quarter"
+ (Years _, Just d) -> checkStart d "year"
+ _ -> (i,s)
+ where
+ checkStart d x =
+ let firstDate = fixSmartDate d ("","this",x)
+ in
+ if d == firstDate then (i,s)
+ else error' $ "Unable to generate transactions according to "++(show periodExpr)++" as "++(show d)++" is not a first day of the "++x
diff --git a/Hledger/Data/Commodity.hs b/Hledger/Data/Commodity.hs
index d6c1a73..62837f1 100644
--- a/Hledger/Data/Commodity.hs
+++ b/Hledger/Data/Commodity.hs
@@ -14,7 +14,6 @@ where
import Data.List
import Data.Maybe (fromMaybe)
import Data.Monoid
--- import Data.Text (Text)
import qualified Data.Text as T
import Test.HUnit
-- import qualified Data.Map as M
diff --git a/Hledger/Data/Dates.hs b/Hledger/Data/Dates.hs
index 6ce5846..c45a857 100644
--- a/Hledger/Data/Dates.hs
+++ b/Hledger/Data/Dates.hs
@@ -73,6 +73,7 @@ import Prelude ()
import Prelude.Compat
import Control.Monad
import Data.List.Compat
+import Data.Default
import Data.Maybe
import Data.Text (Text)
import qualified Data.Text as T
@@ -88,6 +89,7 @@ import Data.Time.Clock
import Data.Time.LocalTime
import Safe (headMay, lastMay, readMay)
import Text.Megaparsec.Compat
+import Text.Megaparsec.Perm
import Text.Printf
import Hledger.Data.Types
@@ -165,9 +167,15 @@ spansSpan spans = DateSpan (maybe Nothing spanStart $ headMay spans) (maybe Noth
-- >>> t (Weeks 2) "2008/01/01" "2008/01/15"
-- [DateSpan 2007/12/31-2008/01/13,DateSpan 2008/01/14-2008/01/27]
-- >>> t (DayOfMonth 2) "2008/01/01" "2008/04/01"
--- [DateSpan 2008/01/02-2008/02/01,DateSpan 2008/02/02-2008/03/01,DateSpan 2008/03/02-2008/04/01]
+-- [DateSpan 2007/12/02-2008/01/01,DateSpan 2008/01/02-2008/02/01,DateSpan 2008/02/02-2008/03/01,DateSpan 2008/03/02-2008/04/01]
+-- >>> t (WeekdayOfMonth 2 4) "2011/01/01" "2011/02/15"
+-- [DateSpan 2010/12/09-2011/01/12,DateSpan 2011/01/13-2011/02/09,DateSpan 2011/02/10-2011/03/09]
-- >>> t (DayOfWeek 2) "2011/01/01" "2011/01/15"
--- [DateSpan 2011/01/04-2011/01/10,DateSpan 2011/01/11-2011/01/17]
+-- [DateSpan 2010/12/28-2011/01/03,DateSpan 2011/01/04-2011/01/10,DateSpan 2011/01/11-2011/01/17]
+-- >>> t (DayOfYear 11 29) "2011/10/01" "2011/10/15"
+-- [DateSpan 2010/11/29-2011/11/28]
+-- >>> t (DayOfYear 11 29) "2011/12/01" "2012/12/15"
+-- [DateSpan 2011/11/29-2012/11/28,DateSpan 2012/11/29-2013/11/28]
--
splitSpan :: Interval -> DateSpan -> [DateSpan]
splitSpan _ (DateSpan Nothing Nothing) = [DateSpan Nothing Nothing]
@@ -177,8 +185,10 @@ splitSpan (Weeks n) s = splitspan startofweek (applyN n nextweek) s
splitSpan (Months n) s = splitspan startofmonth (applyN n nextmonth) s
splitSpan (Quarters n) s = splitspan startofquarter (applyN n nextquarter) s
splitSpan (Years n) s = splitspan startofyear (applyN n nextyear) s
-splitSpan (DayOfMonth n) s = splitspan (nthdayofmonthcontaining n) (applyN (n-1) nextday . nextmonth) s
+splitSpan (DayOfMonth n) s = splitspan (nthdayofmonthcontaining n) (nthdayofmonth n . nextmonth) s
+splitSpan (WeekdayOfMonth n wd) s = splitspan (nthweekdayofmonthcontaining n wd) (advancetonthweekday n wd . nextmonth) s
splitSpan (DayOfWeek n) s = splitspan (nthdayofweekcontaining n) (applyN (n-1) nextday . nextweek) s
+splitSpan (DayOfYear m n) s= splitspan (nthdayofyearcontaining m n) (applyN (n-1) nextday . applyN (m-1) nextmonth . nextyear) s
-- splitSpan (WeekOfYear n) s = splitspan startofweek (applyN n nextweek) s
-- splitSpan (MonthOfYear n) s = splitspan startofmonth (applyN n nextmonth) s
-- splitSpan (QuarterOfYear n) s = splitspan startofquarter (applyN n nextquarter) s
@@ -257,7 +267,7 @@ earliest (Just d1) (Just d2) = Just $ min d1 d2
-- | Parse a period expression to an Interval and overall DateSpan using
-- the provided reference date, or return a parse error.
parsePeriodExpr :: Day -> Text -> Either (ParseError Char MPErr) (Interval, DateSpan)
-parsePeriodExpr refdate = parsewith (periodexpr refdate <* eof)
+parsePeriodExpr refdate s = parsewith (periodexpr refdate <* eof) (T.toLower s)
maybePeriod :: Day -> Text -> Maybe (Interval,DateSpan)
maybePeriod refdate = either (const Nothing) Just . parsePeriodExpr refdate
@@ -447,6 +457,7 @@ thismonth = startofmonth
prevmonth = startofmonth . addGregorianMonthsClip (-1)
nextmonth = startofmonth . addGregorianMonthsClip 1
startofmonth day = fromGregorian y m 1 where (y,m,_) = toGregorian day
+nthdayofmonth d day = fromGregorian y m d where (y,m,_) = toGregorian day
thisquarter = startofquarter
prevquarter = startofquarter . addGregorianMonthsClip (-3)
@@ -461,18 +472,106 @@ prevyear = startofyear . addGregorianYearsClip (-1)
nextyear = startofyear . addGregorianYearsClip 1
startofyear day = fromGregorian y 1 1 where (y,_,_) = toGregorian day
-nthdayofmonthcontaining n d | d1 >= d = d1
- | otherwise = d2
- where d1 = addDays (fromIntegral n-1) s
- d2 = addDays (fromIntegral n-1) $ nextmonth s
+-- | For given date d find year-long interval that starts on given MM/DD of year
+-- and covers it.
+--
+-- Examples: lets take 2017-11-22. Year-long intervals covering it that
+-- starts before Nov 22 will start in 2017. However
+-- intervals that start after Nov 23rd should start in 2016:
+-- >>> let wed22nd = parsedate "2017-11-22"
+-- >>> nthdayofyearcontaining 11 21 wed22nd
+-- 2017-11-21
+-- >>> nthdayofyearcontaining 11 22 wed22nd
+-- 2017-11-22
+-- >>> nthdayofyearcontaining 11 23 wed22nd
+-- 2016-11-23
+-- >>> nthdayofyearcontaining 12 02 wed22nd
+-- 2016-12-02
+-- >>> nthdayofyearcontaining 12 31 wed22nd
+-- 2016-12-31
+-- >>> nthdayofyearcontaining 1 1 wed22nd
+-- 2017-01-01
+nthdayofyearcontaining m n d | mmddOfSameYear <= d = mmddOfSameYear
+ | otherwise = mmddOfPrevYear
+ where mmddOfSameYear = addDays (fromIntegral n-1) $ applyN (m-1) nextmonth s
+ mmddOfPrevYear = addDays (fromIntegral n-1) $ applyN (m-1) nextmonth $ prevyear s
+ s = startofyear d
+
+-- | For given date d find month-long interval that starts on nth day of month
+-- and covers it.
+--
+-- Examples: lets take 2017-11-22. Month-long intervals covering it that
+-- start on 1st-22nd of month will start in Nov. However
+-- intervals that start on 23rd-30th of month should start in Oct:
+-- >>> let wed22nd = parsedate "2017-11-22"
+-- >>> nthdayofmonthcontaining 1 wed22nd
+-- 2017-11-01
+-- >>> nthdayofmonthcontaining 12 wed22nd
+-- 2017-11-12
+-- >>> nthdayofmonthcontaining 22 wed22nd
+-- 2017-11-22
+-- >>> nthdayofmonthcontaining 23 wed22nd
+-- 2017-10-23
+-- >>> nthdayofmonthcontaining 30 wed22nd
+-- 2017-10-30
+nthdayofmonthcontaining n d | nthOfSameMonth <= d = nthOfSameMonth
+ | otherwise = nthOfPrevMonth
+ where nthOfSameMonth = nthdayofmonth n s
+ nthOfPrevMonth = nthdayofmonth n $ prevmonth s
s = startofmonth d
-nthdayofweekcontaining n d | d1 >= d = d1
- | otherwise = d2
- where d1 = addDays (fromIntegral n-1) s
- d2 = addDays (fromIntegral n-1) $ nextweek s
+-- | For given date d find week-long interval that starts on nth day of week
+-- and covers it.
+--
+-- Examples: 2017-11-22 is Wed. Week-long intervals that cover it and
+-- start on Mon, Tue or Wed will start in the same week. However
+-- intervals that start on Thu or Fri should start in prev week:
+-- >>> let wed22nd = parsedate "2017-11-22"
+-- >>> nthdayofweekcontaining 1 wed22nd
+-- 2017-11-20
+-- >>> nthdayofweekcontaining 2 wed22nd
+-- 2017-11-21
+-- >>> nthdayofweekcontaining 3 wed22nd
+-- 2017-11-22
+-- >>> nthdayofweekcontaining 4 wed22nd
+-- 2017-11-16
+-- >>> nthdayofweekcontaining 5 wed22nd
+-- 2017-11-17
+nthdayofweekcontaining n d | nthOfSameWeek <= d = nthOfSameWeek
+ | otherwise = nthOfPrevWeek
+ where nthOfSameWeek = addDays (fromIntegral n-1) s
+ nthOfPrevWeek = addDays (fromIntegral n-1) $ prevweek s
s = startofweek d
+-- | For given date d find month-long interval that starts on nth weekday of month
+-- and covers it.
+--
+-- Examples: 2017-11-22 is 3rd Wed of Nov. Month-long intervals that cover it and
+-- start on 1st-4th Wed will start in Nov. However
+-- intervals that start on 4th Thu or Fri or later should start in Oct:
+-- >>> let wed22nd = parsedate "2017-11-22"
+-- >>> nthweekdayofmonthcontaining 1 3 wed22nd
+-- 2017-11-01
+-- >>> nthweekdayofmonthcontaining 3 2 wed22nd
+-- 2017-11-21
+-- >>> nthweekdayofmonthcontaining 4 3 wed22nd
+-- 2017-11-22
+-- >>> nthweekdayofmonthcontaining 4 4 wed22nd
+-- 2017-10-26
+-- >>> nthweekdayofmonthcontaining 4 5 wed22nd
+-- 2017-10-27
+nthweekdayofmonthcontaining n wd d | nthWeekdaySameMonth <= d = nthWeekdaySameMonth
+ | otherwise = nthWeekdayPrevMonth
+ where nthWeekdaySameMonth = advancetonthweekday n wd $ startofmonth d
+ nthWeekdayPrevMonth = advancetonthweekday n wd $ prevmonth d
+
+-- | Advance to nth weekday wd after given start day s
+advancetonthweekday n wd s = addWeeks (n-1) . firstMatch (>=s) . iterate (addWeeks 1) $ firstweekday s
+ where
+ addWeeks k = addDays (7 * fromIntegral k)
+ firstMatch p = head . dropWhile (not . p)
+ firstweekday = addDays (fromIntegral wd-1) . startofweek
+
----------------------------------------------------------------------
-- parsing
@@ -529,11 +628,6 @@ parsedate s = fromMaybe (error' $ "could not parse date \"" ++ s ++ "\"")
-- -- 2008-02-29
-- #endif
--- | Parse a time string to a time type using the provided pattern, or
--- return the default.
-_parsetimewith :: ParseTime t => String -> String -> t -> t
-_parsetimewith pat s def = fromMaybe def $ parsetime defaultTimeLocale pat s
-
{-|
Parse a date in any of the formats allowed in ledger's period expressions,
and maybe some others:
@@ -633,17 +727,11 @@ md = do
months = ["january","february","march","april","may","june",
"july","august","september","october","november","december"]
monthabbrevs = ["jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec"]
--- weekdays = ["monday","tuesday","wednesday","thursday","friday","saturday","sunday"]
--- weekdayabbrevs = ["mon","tue","wed","thu","fri","sat","sun"]
-
-#if MIN_VERSION_megaparsec(6,0,0)
-lc = T.toLower
-#else
-lc = lowercase
-#endif
+weekdays = ["monday","tuesday","wednesday","thursday","friday","saturday","sunday"]
+weekdayabbrevs = ["mon","tue","wed","thu","fri","sat","sun"]
-monthIndex t = maybe 0 (+1) $ lc t `elemIndex` months
-monIndex t = maybe 0 (+1) $ lc t `elemIndex` monthabbrevs
+monthIndex t = maybe 0 (+1) $ t `elemIndex` months
+monIndex t = maybe 0 (+1) $ t `elemIndex` monthabbrevs
month :: SimpleTextParser SmartDate
month = do
@@ -657,6 +745,12 @@ mon = do
let i = monIndex m
return ("",show i,"")
+weekday :: SimpleTextParser Int
+weekday = do
+ wday <- choice . map string' $ weekdays ++ weekdayabbrevs
+ let i = head . catMaybes $ [wday `elemIndex` weekdays, wday `elemIndex` weekdayabbrevs]
+ return (i+1)
+
today,yesterday,tomorrow :: SimpleTextParser SmartDate
today = string "today" >> return ("","","today")
yesterday = string "yesterday" >> return ("","","yesterday")
@@ -683,45 +777,63 @@ lastthisnextthing = do
return ("", T.unpack r, T.unpack p)
-- |
--- >>> let p = parsewith (periodexpr (parsedate "2008/11/26")) :: T.Text -> Either (ParseError Char MPErr) (Interval, DateSpan)
--- >>> p "from aug to oct"
+-- >>> let p = parsePeriodExpr (parsedate "2008/11/26")
+-- >>> p "from Aug to Oct"
-- Right (NoInterval,DateSpan 2008/08/01-2008/09/30)
-- >>> p "aug to oct"
-- Right (NoInterval,DateSpan 2008/08/01-2008/09/30)
--- >>> p "every 3 days in aug"
+-- >>> p "every 3 days in Aug"
-- Right (Days 3,DateSpan 2008/08)
-- >>> p "daily from aug"
-- Right (Days 1,DateSpan 2008/08/01-)
-- >>> p "every week to 2009"
-- Right (Weeks 1,DateSpan -2008/12/31)
+-- >>> p "every 2nd day of month"
+-- Right (DayOfMonth 2,DateSpan -)
+-- >>> p "every 2nd day"
+-- Right (DayOfMonth 2,DateSpan -)
+-- >>> p "every 2nd day 2009-"
+-- Right (DayOfMonth 2,DateSpan 2009/01/01-)
+-- >>> p "every 29th Nov"
+-- Right (DayOfYear 11 29,DateSpan -)
+-- >>> p "every 29th nov -2009"
+-- Right (DayOfYear 11 29,DateSpan -2008/12/31)
+-- >>> p "every nov 29th"
+-- Right (DayOfYear 11 29,DateSpan -)
+-- >>> p "every Nov 29th 2009-"
+-- Right (DayOfYear 11 29,DateSpan 2009/01/01-)
+-- >>> p "every 11/29 from 2009"
+-- Right (DayOfYear 11 29,DateSpan 2009/01/01-)
+-- >>> p "every 2nd Thursday of month to 2009"
+-- Right (WeekdayOfMonth 2 4,DateSpan -2008/12/31)
+-- >>> p "every 1st monday of month to 2009"
+-- Right (WeekdayOfMonth 1 1,DateSpan -2008/12/31)
+-- >>> p "every tue"
+-- Right (DayOfWeek 2,DateSpan -)
+-- >>> p "every 2nd day of week"
+-- Right (DayOfWeek 2,DateSpan -)
+-- >>> p "every 2nd day of month"
+-- Right (DayOfMonth 2,DateSpan -)
+-- >>> p "every 2nd day"
+-- Right (DayOfMonth 2,DateSpan -)
+-- >>> p "every 2nd day 2009-"
+-- Right (DayOfMonth 2,DateSpan 2009/01/01-)
+-- >>> p "every 2nd day of month 2009-"
+-- Right (DayOfMonth 2,DateSpan 2009/01/01-)
periodexpr :: Day -> SimpleTextParser (Interval, DateSpan)
-periodexpr rdate = choice $ map try [
+periodexpr rdate = surroundedBy (many spacenonewline) . choice $ map try [
intervalanddateperiodexpr rdate,
- intervalperiodexpr,
- dateperiodexpr rdate,
- (return (NoInterval,DateSpan Nothing Nothing))
+ (,) NoInterval <$> periodexprdatespan rdate
]
intervalanddateperiodexpr :: Day -> SimpleTextParser (Interval, DateSpan)
intervalanddateperiodexpr rdate = do
- many spacenonewline
i <- reportinginterval
- many spacenonewline
- s <- periodexprdatespan rdate
+ s <- option def . try $ do
+ many spacenonewline
+ periodexprdatespan rdate
return (i,s)
-intervalperiodexpr :: SimpleTextParser (Interval, DateSpan)
-intervalperiodexpr = do
- many spacenonewline
- i <- reportinginterval
- return (i, DateSpan Nothing Nothing)
-
-dateperiodexpr :: Day -> SimpleTextParser (Interval, DateSpan)
-dateperiodexpr rdate = do
- many spacenonewline
- s <- periodexprdatespan rdate
- return (NoInterval, s)
-
-- Parse a reporting interval.
reportinginterval :: SimpleTextParser Interval
reportinginterval = choice' [
@@ -736,31 +848,52 @@ reportinginterval = choice' [
return $ Months 2,
do string "every"
many spacenonewline
- n <- fmap read $ some digitChar
- thsuffix
+ n <- nth
many spacenonewline
string "day"
+ of_ "week"
+ return $ DayOfWeek n,
+ do string "every"
many spacenonewline
- string "of"
- many spacenonewline
- string "week"
+ n <- weekday
return $ DayOfWeek n,
do string "every"
many spacenonewline
- n <- fmap read $ some digitChar
- thsuffix
+ n <- nth
many spacenonewline
string "day"
- optional $ do
- many spacenonewline
- string "of"
- many spacenonewline
- string "month"
- return $ DayOfMonth n
+ optOf_ "month"
+ return $ DayOfMonth n,
+ do string "every"
+ let mnth = choice' [month, mon] >>= \(_,m,_) -> return (read m)
+ d_o_y <- makePermParser $ DayOfYear <$$> try (many spacenonewline *> mnth) <||> try (many spacenonewline *> nth)
+ optOf_ "year"
+ return d_o_y,
+ do string "every"
+ many spacenonewline
+ ("",m,d) <- md
+ optOf_ "year"
+ return $ DayOfYear (read m) (read d),
+ do string "every"
+ many spacenonewline
+ n <- nth
+ many spacenonewline
+ wd <- weekday
+ optOf_ "month"
+ return $ WeekdayOfMonth n wd
]
where
-
- thsuffix = choice' $ map string ["st","nd","rd","th"]
+ of_ period = do
+ many spacenonewline
+ string "of"
+ many spacenonewline
+ string period
+
+ optOf_ period = optional $ try $ of_ period
+
+ nth = do n <- some digitChar
+ choice' $ map string ["st","nd","rd","th"]
+ return $ read n
-- Parse any of several variants of a basic interval, eg "daily", "every day", "every N days".
tryinterval :: String -> String -> (Int -> Interval) -> SimpleTextParser Interval
diff --git a/Hledger/Data/Journal.hs b/Hledger/Data/Journal.hs
index dcd7bbb..987d2fd 100644
--- a/Hledger/Data/Journal.hs
+++ b/Hledger/Data/Journal.hs
@@ -29,8 +29,12 @@ module Hledger.Data.Journal (
filterTransactionPostings,
filterPostingAmount,
-- * Querying
- journalAccountNames,
journalAccountNamesUsed,
+ journalAccountNamesImplied,
+ journalAccountNamesDeclared,
+ journalAccountNamesDeclaredOrUsed,
+ journalAccountNamesDeclaredOrImplied,
+ journalAccountNames,
-- journalAmountAndPriceCommodities,
journalAmounts,
overJournalAmounts,
@@ -75,6 +79,7 @@ import Data.Array.ST
import Data.Functor.Identity (Identity(..))
import qualified Data.HashTable.ST.Cuckoo as HT
import Data.List
+import Data.List.Extra (groupSort)
-- import Data.Map (findWithDefault)
import Data.Maybe
import Data.Monoid
@@ -238,13 +243,32 @@ journalDescriptions = nub . sort . map tdescription . jtxns
journalPostings :: Journal -> [Posting]
journalPostings = concatMap tpostings . jtxns
--- | Unique account names posted to in this journal.
+-- | Sorted unique account names posted to by this journal's transactions.
journalAccountNamesUsed :: Journal -> [AccountName]
-journalAccountNamesUsed = sort . accountNamesFromPostings . journalPostings
+journalAccountNamesUsed = accountNamesFromPostings . journalPostings
+
+-- | Sorted unique account names implied by this journal's transactions -
+-- accounts posted to and all their implied parent accounts.
+journalAccountNamesImplied :: Journal -> [AccountName]
+journalAccountNamesImplied = expandAccountNames . journalAccountNamesUsed
+
+-- | Sorted unique account names declared by account directives in this journal.
+journalAccountNamesDeclared :: Journal -> [AccountName]
+journalAccountNamesDeclared = nub . sort . jaccounts
--- | Unique account names in this journal, including parent accounts containing no postings.
+-- | Sorted unique account names declared by account directives or posted to
+-- by transactions in this journal.
+journalAccountNamesDeclaredOrUsed :: Journal -> [AccountName]
+journalAccountNamesDeclaredOrUsed j = nub $ sort $ journalAccountNamesDeclared j ++ journalAccountNamesUsed j
+
+-- | Sorted unique account names declared by account directives, or posted to
+-- or implied as parents by transactions in this journal.
+journalAccountNamesDeclaredOrImplied :: Journal -> [AccountName]
+journalAccountNamesDeclaredOrImplied j = nub $ sort $ journalAccountNamesDeclared j ++ journalAccountNamesImplied j
+
+-- | Convenience/compatibility alias for journalAccountNamesDeclaredOrImplied.
journalAccountNames :: Journal -> [AccountName]
-journalAccountNames = sort . expandAccountNames . journalAccountNamesUsed
+journalAccountNames = journalAccountNamesDeclaredOrImplied
journalAccountNameTree :: Journal -> Tree AccountName
journalAccountNameTree = accountNameTreeFrom . journalAccountNames
@@ -513,7 +537,7 @@ journalCheckBalanceAssertions j =
-- | Check a posting's balance assertion and return an error if it
-- fails.
checkBalanceAssertion :: Posting -> MixedAmount -> Either String ()
-checkBalanceAssertion p@Posting{ pbalanceassertion = Just ass} amt
+checkBalanceAssertion p@Posting{ pbalanceassertion = Just (ass,_)} amt
| isReallyZeroAmount diff = Right ()
| True = Left err
where assertedcomm = acommodity ass
@@ -535,7 +559,8 @@ checkBalanceAssertion p@Posting{ pbalanceassertion = Just ass} amt
(case ptransaction p of
Nothing -> ":" -- shouldn't happen
Just t -> printf " in %s:\nin transaction:\n%s"
- (showGenericSourcePos $ tsourcepos t) (chomp $ show t) :: String)
+ (showGenericSourcePos pos) (chomp $ show t) :: String
+ where pos = snd $ fromJust $ pbalanceassertion p)
(showPostingLine p)
(showDate $ postingDate p)
(T.unpack $ paccount p) -- XXX pack
@@ -663,7 +688,7 @@ checkInferAndRegisterAmounts (Right oldTx) = do
where
inferFromAssignment :: Posting -> CurrentBalancesModifier s Posting
inferFromAssignment p = maybe (return p)
- (fmap (\a -> p { pamount = a, porigin = Just $ originalPosting p }) . setBalance (paccount p))
+ (fmap (\a -> p { pamount = a, porigin = Just $ originalPosting p }) . setBalance (paccount p) . fst)
$ pbalanceassertion p
-- | Adds a posting's amonut to the posting's account balance and
@@ -730,8 +755,11 @@ journalApplyCommodityStyles j@Journal{jtxns=ts, jmarketprices=mps} = j''
-- from the posting amounts (or in some cases, price amounts) in this
-- commodity if any, otherwise the default style.
journalCommodityStyle :: Journal -> CommoditySymbol -> AmountStyle
-journalCommodityStyle j c =
- headDef amountstyle{asprecision=2} $
+journalCommodityStyle j = fromMaybe amountstyle{asprecision=2} . journalCommodityStyleLookup j
+
+journalCommodityStyleLookup :: Journal -> CommoditySymbol -> Maybe AmountStyle
+journalCommodityStyleLookup j c =
+ listToMaybe $
catMaybes [
M.lookup c (jcommodities j) >>= cformat
,M.lookup c $ jinferredcommodities j
@@ -751,8 +779,7 @@ journalInferCommodityStyles j =
commodityStylesFromAmounts :: [Amount] -> M.Map CommoditySymbol AmountStyle
commodityStylesFromAmounts amts = M.fromList commstyles
where
- samecomm = \a1 a2 -> acommodity a1 == acommodity a2
- commamts = [(acommodity $ head as, as) | as <- groupBy samecomm $ sortBy (comparing acommodity) amts]
+ commamts = groupSort [(acommodity as, as) | as <- amts]
commstyles = [(c, canonicalStyleFrom $ map astyle as) | (c,as) <- commamts]
-- | Given an ordered list of amount styles, choose a canonical style.
@@ -801,7 +828,10 @@ journalConvertAmountsToCost j@Journal{jtxns=ts} = j{jtxns=map fixtransaction ts}
fixtransaction t@Transaction{tpostings=ps} = t{tpostings=map fixposting ps}
fixposting p@Posting{pamount=a} = p{pamount=fixmixedamount a}
fixmixedamount (Mixed as) = Mixed $ map fixamount as
- fixamount = canonicaliseAmount (jinferredcommodities j) . costOfAmount
+ fixamount = applyJournalStyle . costOfAmount
+ applyJournalStyle a
+ | Just s <- journalCommodityStyleLookup j (acommodity a) = a{astyle=s}
+ | otherwise = a
-- -- | Get this journal's unique, display-preference-canonicalised commodities, by symbol.
-- journalCanonicalCommodities :: Journal -> M.Map String CommoditySymbol
diff --git a/Hledger/Data/Posting.hs b/Hledger/Data/Posting.hs
index f645634..cee6dee 100644
--- a/Hledger/Data/Posting.hs
+++ b/Hledger/Data/Posting.hs
@@ -132,8 +132,9 @@ hasAmount = (/= missingmixedamt) . pamount
isAssignment :: Posting -> Bool
isAssignment p = not (hasAmount p) && isJust (pbalanceassertion p)
+-- | Sorted unique account names referenced by these postings.
accountNamesFromPostings :: [Posting] -> [AccountName]
-accountNamesFromPostings = nub . map paccount
+accountNamesFromPostings = nub . sort . map paccount
sumPostings :: [Posting] -> MixedAmount
sumPostings = sumStrict . map pamount
diff --git a/Hledger/Data/Transaction.hs b/Hledger/Data/Transaction.hs
index 7c75a1d..ba74ae1 100644
--- a/Hledger/Data/Transaction.hs
+++ b/Hledger/Data/Transaction.hs
@@ -213,7 +213,7 @@ postingAsLines elideamount onelineamounts ps p = concat [
| postingblock <- postingblocks]
where
postingblocks = [map rstrip $ lines $ concatTopPadded [statusandaccount, " ", amount, assertion, samelinecomment] | amount <- shownAmounts]
- assertion = maybe "" ((" = " ++) . showAmountWithZeroCommodity) $ pbalanceassertion p
+ assertion = maybe "" ((" = " ++) . showAmountWithZeroCommodity . fst) $ pbalanceassertion p
statusandaccount = indent $ fitString (Just $ minwidth) Nothing False True $ pstatusandacct p
where
-- pad to the maximum account name width, plus 2 to leave room for status flags, to keep amounts aligned
@@ -228,6 +228,7 @@ postingAsLines elideamount onelineamounts ps p = concat [
shownAmounts
| elideamount = [""]
| onelineamounts = [fitString (Just amtwidth) Nothing False False $ showMixedAmountOneLine $ pamount p]
+ | null (amounts $ pamount p) = [""]
| otherwise = map (fitStringMulti (Just amtwidth) Nothing False False . showAmount ) . amounts $ pamount p
where
amtwidth = maximum $ 12 : map (strWidth . showMixedAmount . pamount) ps -- min. 12 for backwards compatibility
@@ -254,7 +255,7 @@ showPostingLines p = postingAsLines False False ps p where
tests_postingAsLines = [
"postingAsLines" ~: do
let p `gives` ls = assertEqual (show p) ls (postingAsLines False False [p] p)
- posting `gives` []
+ posting `gives` [""]
posting{
pstatus=Cleared,
paccount="a",
diff --git a/Hledger/Data/Types.hs b/Hledger/Data/Types.hs
index 0700e3f..e0ed5e7 100644
--- a/Hledger/Data/Types.hs
+++ b/Hledger/Data/Types.hs
@@ -43,6 +43,8 @@ data WhichDate = PrimaryDate | SecondaryDate deriving (Eq,Show)
data DateSpan = DateSpan (Maybe Day) (Maybe Day) deriving (Eq,Ord,Data,Generic,Typeable)
+instance Default DateSpan where def = DateSpan Nothing Nothing
+
instance NFData DateSpan
-- synonyms for various date-related scalars
@@ -89,7 +91,9 @@ data Interval =
| Quarters Int
| Years Int
| DayOfMonth Int
+ | WeekdayOfMonth Int Int
| DayOfWeek Int
+ | DayOfYear Int Int -- Month, Day
-- WeekOfYear Int
-- MonthOfYear Int
-- QuarterOfYear Int
@@ -192,6 +196,8 @@ instance Show Status where -- custom show.. bad idea.. don't do it..
show Pending = "!"
show Cleared = "*"
+type BalanceAssertion = Maybe (Amount, GenericSourcePos)
+
data Posting = Posting {
pdate :: Maybe Day, -- ^ this posting's date, if different from the transaction's
pdate2 :: Maybe Day, -- ^ this posting's secondary date, if different from the transaction's
@@ -201,7 +207,7 @@ data Posting = Posting {
pcomment :: Text, -- ^ this posting's comment lines, as a single non-indented multi-line string
ptype :: PostingType,
ptags :: [Tag], -- ^ tag names and values, extracted from the comment
- pbalanceassertion :: Maybe Amount, -- ^ optional: the expected balance in this commodity in the account after this posting
+ pbalanceassertion :: BalanceAssertion, -- ^ optional: the expected balance in this commodity in the account after this posting
ptransaction :: Maybe Transaction, -- ^ this posting's parent transaction (co-recursive types).
-- Tying this knot gets tedious, Maybe makes it easier/optional.
porigin :: Maybe Posting -- ^ original posting if this one is result of any transformations (one level only)
diff --git a/Hledger/Read/Common.hs b/Hledger/Read/Common.hs
index 0af5948..85dac54 100644
--- a/Hledger/Read/Common.hs
+++ b/Hledger/Read/Common.hs
@@ -14,6 +14,7 @@ Some of these might belong in Hledger.Read.JournalReader or Hledger.Read.
--- * module
{-# LANGUAGE CPP, DeriveDataTypeable, RecordWildCards, NamedFieldPuns, NoMonoLocalBinds, ScopedTypeVariables, FlexibleContexts, TupleSections, OverloadedStrings #-}
+{-# LANGUAGE LambdaCase #-}
module Hledger.Read.Common
where
@@ -23,7 +24,7 @@ import Prelude.Compat hiding (readFile)
import Control.Monad.Compat
import Control.Monad.Except (ExceptT(..), runExceptT, throwError) --, catchError)
import Control.Monad.State.Strict
-import Data.Char (isNumber)
+import Data.Char
import Data.Data
import Data.Default
import Data.Functor.Identity
@@ -31,6 +32,7 @@ import Data.List.Compat
import Data.List.NonEmpty (NonEmpty(..))
import Data.List.Split (wordsBy)
import Data.Maybe
+import qualified Data.Map as M
import Data.Monoid
import Data.Text (Text)
import qualified Data.Text as T
@@ -146,6 +148,24 @@ setDefaultCommodityAndStyle cs = modify' (\j -> j{jparsedefaultcommodity=Just cs
getDefaultCommodityAndStyle :: JournalParser m (Maybe (CommoditySymbol,AmountStyle))
getDefaultCommodityAndStyle = jparsedefaultcommodity `fmap` get
+-- | Get amount style associated with default currency.
+--
+-- Returns 'AmountStyle' used to defined by a latest default commodity directive
+-- prior to current position within this file or its parents.
+getDefaultAmountStyle :: JournalParser m (Maybe AmountStyle)
+getDefaultAmountStyle = fmap snd <$> getDefaultCommodityAndStyle
+
+-- | Lookup currency-specific amount style.
+--
+-- Returns 'AmountStyle' used in commodity directive within current journal
+-- prior to current position or in its parents files.
+getAmountStyle :: CommoditySymbol -> JournalParser m (Maybe AmountStyle)
+getAmountStyle commodity = do
+ specificStyle <- maybe Nothing cformat . M.lookup commodity . jcommodities <$> get
+ defaultStyle <- fmap snd <$> getDefaultCommodityAndStyle
+ let effectiveStyle = listToMaybe $ catMaybes [specificStyle, defaultStyle]
+ return effectiveStyle
+
pushAccount :: AccountName -> JournalParser m ()
pushAccount acct = modify' (\j -> j{jaccounts = acct : jaccounts j})
@@ -416,8 +436,9 @@ leftsymbolamountp = do
sign <- lift signp
m <- lift multiplierp
c <- lift commoditysymbolp
+ suggestedStyle <- getAmountStyle c
sp <- lift $ many spacenonewline
- (q,prec,mdec,mgrps) <- lift numberp
+ (q,prec,mdec,mgrps) <- lift $ numberp suggestedStyle
let s = amountstyle{ascommodityside=L, ascommodityspaced=not $ null sp, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps}
p <- priceamountp
let applysign = if sign=="-" then negate else id
@@ -427,9 +448,12 @@ leftsymbolamountp = do
rightsymbolamountp :: Monad m => JournalParser m Amount
rightsymbolamountp = do
m <- lift multiplierp
- (q,prec,mdec,mgrps) <- lift numberp
+ sign <- lift signp
+ rawnum <- lift $ rawnumberp
sp <- lift $ many spacenonewline
c <- lift commoditysymbolp
+ suggestedStyle <- getAmountStyle c
+ let (q,prec,mdec,mgrps) = fromRawNumber suggestedStyle (sign == "-") rawnum
p <- priceamountp
let s = amountstyle{ascommodityside=R, ascommodityspaced=not $ null sp, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps}
return $ Amount c q p s m
@@ -438,7 +462,8 @@ rightsymbolamountp = do
nosymbolamountp :: Monad m => JournalParser m Amount
nosymbolamountp = do
m <- lift multiplierp
- (q,prec,mdec,mgrps) <- lift numberp
+ suggestedStyle <- getDefaultAmountStyle
+ (q,prec,mdec,mgrps) <- lift $ numberp suggestedStyle
p <- priceamountp
-- apply the most recently seen default commodity and style to this commodityless amount
defcs <- getDefaultCommodityAndStyle
@@ -477,14 +502,15 @@ priceamountp =
return $ UnitPrice a))
<|> return NoPrice
-partialbalanceassertionp :: Monad m => JournalParser m (Maybe Amount)
+partialbalanceassertionp :: Monad m => JournalParser m BalanceAssertion
partialbalanceassertionp =
try (do
lift (many spacenonewline)
+ sourcepos <- genericSourcePos <$> lift getPosition
char '='
lift (many spacenonewline)
a <- amountp -- XXX should restrict to a simple amount
- return $ Just $ a)
+ return $ Just (a, sourcepos))
<|> return Nothing
-- balanceassertion :: Monad m => TextParser m (Maybe MixedAmount)
@@ -524,55 +550,79 @@ fixedlotpricep =
-- seen following the decimal point), the decimal point character used if any,
-- and the digit group style if any.
--
-numberp :: TextParser m (Quantity, Int, Maybe Char, Maybe DigitGroupStyle)
-numberp = do
- -- a number is an optional sign followed by a sequence of digits possibly
- -- interspersed with periods, commas, or both
- -- ptrace "numberp"
- sign <- signp
- parts <- some $ choice' [some digitChar, some $ char ',', some $ char '.']
- dbg8 "numberp parsed" (sign,parts) `seq` return ()
-
- -- check the number is well-formed and identify the decimal point and digit
- -- group separator characters used, if any
- let (numparts, puncparts) = partition numeric parts
- (ok, mdecimalpoint, mseparator) =
- case (numparts, puncparts) of
- ([],_) -> (False, Nothing, Nothing) -- no digits, not ok
- (_,[]) -> (True, Nothing, Nothing) -- digits with no punctuation, ok
- (_,[[d]]) -> (True, Just d, Nothing) -- just a single punctuation of length 1, assume it's a decimal point
- (_,[_]) -> (False, Nothing, Nothing) -- a single punctuation of some other length, not ok
- (_,_:_:_) -> -- two or more punctuations
- let (s:ss, d) = (init puncparts, last puncparts) -- the leftmost is a separator and the rightmost may be a decimal point
- in if any ((/=1).length) puncparts -- adjacent punctuation chars, not ok
- || any (s/=) ss -- separator chars vary, not ok
- || head parts == s -- number begins with a separator char, not ok
- then (False, Nothing, Nothing)
- else if s == d
- then (True, Nothing, Just $ head s) -- just one kind of punctuation - must be separators
- else (True, Just $ head d, Just $ head s) -- separator(s) and a decimal point
- unless ok $ fail $ "number seems ill-formed: "++concat parts
-
- -- get the digit group sizes and digit group style if any
- let (intparts',fracparts') = span ((/= mdecimalpoint) . Just . head) parts
- (intparts, fracpart) = (filter numeric intparts', filter numeric fracparts')
- groupsizes = reverse $ case map length intparts of
+numberp :: Maybe AmountStyle -> TextParser m (Quantity, Int, Maybe Char, Maybe DigitGroupStyle)
+numberp suggestedStyle = do
+ -- a number is an optional sign followed by a sequence of digits possibly
+ -- interspersed with periods, commas, or both
+ -- ptrace "numberp"
+ sign <- signp
+ raw <- rawnumberp
+ dbg8 "numberp parsed" raw `seq` return ()
+ return $ dbg8 "numberp quantity,precision,mdecimalpoint,mgrps" (fromRawNumber suggestedStyle (sign == "-") raw)
+ <?> "numberp"
+
+fromRawNumber :: Maybe AmountStyle -> Bool -> (Maybe Char, [String], Maybe (Char, String)) -> (Quantity, Int, Maybe Char, Maybe DigitGroupStyle)
+fromRawNumber suggestedStyle negated raw = (quantity, precision, mdecimalpoint, mgrps) where
+ -- unpack with a hint if useful
+ (mseparator, intparts, mdecimalpoint, frac) =
+ case raw of
+ -- just a single punctuation between two digits groups, assume it's a decimal point
+ (Just s, [firstGroup, lastGroup], Nothing)
+ -- if have a decimalHint restrict this assumpion only to a matching separator
+ | maybe True (`asdecimalcheck` s) suggestedStyle -> (Nothing, [firstGroup], Just s, lastGroup)
+
+ (firstSep, digitGroups, Nothing) -> (firstSep, digitGroups, Nothing, [])
+ (firstSep, digitGroups, Just (d, frac)) -> (firstSep, digitGroups, Just d, frac)
+
+ -- get the digit group sizes and digit group style if any
+ groupsizes = reverse $ case map length intparts of
(a:b:cs) | a < b -> b:cs
gs -> gs
- mgrps = (`DigitGroups` groupsizes) <$> mseparator
-
- -- put the parts back together without digit group separators, get the precision and parse the value
- let int = concat $ "":intparts
- frac = concat $ "":fracpart
- precision = length frac
- int' = if null int then "0" else int
- frac' = if null frac then "0" else frac
- quantity = read $ sign++int'++"."++frac' -- this read should never fail
-
- return $ dbg8 "numberp quantity,precision,mdecimalpoint,mgrps" (quantity,precision,mdecimalpoint,mgrps)
- <?> "numberp"
- where
- numeric = isNumber . headDef '_'
+ mgrps = (`DigitGroups` groupsizes) <$> mseparator
+
+ -- put the parts back together without digit group separators, get the precision and parse the value
+ repr = (if negated then "-" else "") ++ "0" ++ concat intparts ++ (if null frac then "" else "." ++ frac)
+ quantity = read repr
+ precision = length frac
+
+ asdecimalcheck :: AmountStyle -> Char -> Bool
+ asdecimalcheck = \case
+ AmountStyle{asdecimalpoint = Just d} -> (d ==)
+ AmountStyle{asdigitgroups = Just (DigitGroups g _)} -> (g /=)
+ AmountStyle{asprecision = 0} -> const False
+ _ -> const True
+
+
+rawnumberp :: TextParser m (Maybe Char, [String], Maybe (Char, String))
+rawnumberp = do
+ let sepChars = ['.', ','] -- all allowed punctuation characters
+
+ (firstSep, groups) <- option (Nothing, []) $ do
+ leadingDigits <- some digitChar
+ option (Nothing, [leadingDigits]) . try $ do
+ firstSep <- oneOf sepChars <|> whitespaceChar
+ groups <- some digitChar `sepBy1` char firstSep
+ return (Just firstSep, leadingDigits : groups)
+
+ let remSepChars = maybe sepChars (`delete` sepChars) firstSep
+ modifier
+ | null groups = fmap Just -- if no digits so far, we require at least some decimals
+ | otherwise = optional
+
+ extraGroup <- modifier $ do
+ lastSep <- oneOf remSepChars
+ digits <- modifier $ some digitChar -- decimal separator allowed to be without digits if had some before
+ return (lastSep, fromMaybe [] digits)
+
+ -- make sure we didn't leading part of mistyped number
+ notFollowedBy $ oneOf sepChars <|> (whitespaceChar >> digitChar)
+
+ return $ dbg8 "rawnumberp" (firstSep, groups, extraGroup)
+ <?> "rawnumberp"
+
+-- | Parse a unicode char that represents any non-control space char (Zs general category).
+whitespaceChar :: TextParser m Char
+whitespaceChar = charCategory Space
-- test_numberp = do
-- let s `is` n = assertParseEqual (parseWithState mempty numberp s) n
@@ -609,15 +659,15 @@ multilinecommentp = do
emptyorcommentlinep :: JournalParser m ()
emptyorcommentlinep = do
- lift (many spacenonewline) >> (commentp <|> (lift (many spacenonewline) >> newline >> return ""))
+ lift (many spacenonewline) >> (linecommentp <|> (lift (many spacenonewline) >> newline >> return ""))
return ()
-- | Parse a possibly multi-line comment following a semicolon.
followingcommentp :: JournalParser m Text
followingcommentp =
-- ptrace "followingcommentp"
- do samelinecomment <- lift (many spacenonewline) >> (try semicoloncommentp <|> (newline >> return ""))
- newlinecomments <- many (try (lift (some spacenonewline) >> semicoloncommentp))
+ do samelinecomment <- lift (many spacenonewline) >> (try commentp <|> (newline >> return ""))
+ newlinecomments <- many (try (lift (some spacenonewline) >> commentp))
return $ T.unlines $ samelinecomment:newlinecomments
-- | Parse a possibly multi-line comment following a semicolon, and
@@ -649,10 +699,10 @@ followingcommentandtagsp mdefdate = do
-- to get good error positions.
startpos <- getPosition
commentandwhitespace :: String <- do
- let semicoloncommentp' = (:) <$> char ';' <*> anyChar `manyTill` eolof
+ let commentp' = (:) <$> char ';' <*> anyChar `manyTill` eolof
sp1 <- lift (many spacenonewline)
- l1 <- try (lift semicoloncommentp') <|> (newline >> return "")
- ls <- lift . many $ try ((++) <$> some spacenonewline <*> semicoloncommentp')
+ l1 <- try (lift commentp') <|> (newline >> return "")
+ ls <- lift . many $ try ((++) <$> some spacenonewline <*> commentp')
return $ unlines $ (sp1 ++ l1) : ls
let comment = T.pack $ unlines $ map (lstrip . dropWhile (==';') . strip) $ lines commentandwhitespace
-- pdbg 0 $ "commentws:"++show commentandwhitespace
@@ -675,14 +725,15 @@ followingcommentandtagsp mdefdate = do
return (comment, tags, mdate, mdate2)
+-- A transaction/posting comment must start with a semicolon.
+-- This parser ignores leading whitespace.
commentp :: JournalParser m Text
-commentp = commentStartingWithp commentchars
-
-commentchars :: [Char]
-commentchars = "#;*"
+commentp = commentStartingWithp ";"
-semicoloncommentp :: JournalParser m Text
-semicoloncommentp = commentStartingWithp ";"
+-- A line (file-level) comment can start with a semicolon, hash,
+-- or star (allowing org nodes). This parser ignores leading whitespace.
+linecommentp :: JournalParser m Text
+linecommentp = commentStartingWithp ";#*"
commentStartingWithp :: [Char] -> JournalParser m Text
commentStartingWithp cs = do
diff --git a/Hledger/Read/CsvReader.hs b/Hledger/Read/CsvReader.hs
index 3c23615..33a7e0b 100644
--- a/Hledger/Read/CsvReader.hs
+++ b/Hledger/Read/CsvReader.hs
@@ -649,10 +649,10 @@ transactionFromCsvRecord sourcepos rules record = t
comment = maybe "" render $ mfieldtemplate "comment"
precomment = maybe "" render $ mfieldtemplate "precomment"
currency = maybe (fromMaybe "" mdefaultcurrency) render $ mfieldtemplate "currency"
- amountstr = (currency++) $ simplifySign $ getAmountStr rules record
- amount = either amounterror (Mixed . (:[])) $ runParser (evalStateT (amountp <* eof) mempty) "" $ T.pack amountstr
+ amountstr = (currency++) <$> simplifySign <$> getAmountStr rules record
+ maybeamount = either amounterror (Mixed . (:[])) <$> runParser (evalStateT (amountp <* eof) mempty) "" <$> T.pack <$> amountstr
amounterror err = error' $ unlines
- ["error: could not parse \""++amountstr++"\" as an amount"
+ ["error: could not parse \""++fromJust amountstr++"\" as an amount"
,showRecord record
,"the amount rule is: "++(fromMaybe "" $ mfieldtemplate "amount")
,"the currency rule is: "++(fromMaybe "unspecified" $ mfieldtemplate "currency")
@@ -662,10 +662,13 @@ transactionFromCsvRecord sourcepos rules record = t
++"change your amount or currency rules, "
++"or "++maybe "add a" (const "change your") mskip++" skip rule"
]
- amount1 = amount
- -- convert balancing amount to cost like hledger print, so eg if
+ amount1 = case maybeamount of
+ Just a -> a
+ Nothing | balance /= Nothing -> nullmixedamt
+ Nothing -> error' $ "amount and balance have no value\n"++showRecord record
+ -- convert balancing amount to cost like hledger print, so eg if
-- amount1 is "10 GBP @@ 15 USD", amount2 will be "-15 USD".
- amount2 = costOfMixedAmount (-amount)
+ amount2 = costOfMixedAmount (-amount1)
s `or` def = if null s then def else s
defaccount1 = fromMaybe "unknown" $ mdirective "default-account1"
defaccount2 = case isNegativeMixedAmount amount2 of
@@ -676,7 +679,7 @@ transactionFromCsvRecord sourcepos rules record = t
balance = maybe Nothing (parsebalance.render) $ mfieldtemplate "balance"
parsebalance str
| all isSpace str = Nothing
- | otherwise = Just $ either (balanceerror str) id $ runParser (evalStateT (amountp <* eof) mempty) "" $ T.pack $ (currency++) $ simplifySign str
+ | otherwise = Just $ (either (balanceerror str) id $ runParser (evalStateT (amountp <* eof) mempty) "" $ T.pack $ (currency++) $ simplifySign str, nullsourcepos)
balanceerror str err = error' $ unlines
["error: could not parse \""++str++"\" as balance amount"
,showRecord record
@@ -702,7 +705,7 @@ transactionFromCsvRecord sourcepos rules record = t
]
}
-getAmountStr :: CsvRules -> CsvRecord -> String
+getAmountStr :: CsvRules -> CsvRecord -> Maybe String
getAmountStr rules record =
let
mamount = getEffectiveAssignment rules record "amount"
@@ -711,11 +714,11 @@ getAmountStr rules record =
render = fmap (strip . renderTemplate rules record)
in
case (render mamount, render mamountin, render mamountout) of
- (Just "", Nothing, Nothing) -> error' $ "amount has no value\n"++showRecord record
- (Just a, Nothing, Nothing) -> a
+ (Just "", Nothing, Nothing) -> Nothing
+ (Just a, Nothing, Nothing) -> Just a
(Nothing, Just "", Just "") -> error' $ "neither amount-in or amount-out has a value\n"++showRecord record
- (Nothing, Just i, Just "") -> i
- (Nothing, Just "", Just o) -> negateStr o
+ (Nothing, Just i, Just "") -> Just i
+ (Nothing, Just "", Just o) -> Just $ negateStr o
(Nothing, Just _, Just _) -> error' $ "both amount-in and amount-out have a value\n"++showRecord record
_ -> error' $ "found values for amount and for amount-in/amount-out - please use either amount or amount-in/amount-out\n"++showRecord record
diff --git a/Hledger/Read/JournalReader.hs b/Hledger/Read/JournalReader.hs
index a41539b..a66d57b 100644
--- a/Hledger/Read/JournalReader.hs
+++ b/Hledger/Read/JournalReader.hs
@@ -81,6 +81,8 @@ import Control.Monad.State.Strict
import qualified Data.Map.Strict as M
import Data.Monoid
import Data.Text (Text)
+import Data.String
+import Data.List
import qualified Data.Text as T
import Data.Time.Calendar
import Data.Time.LocalTime
@@ -157,7 +159,7 @@ addJournalItemP =
directivep :: MonadIO m => ErroringJournalParser m ()
directivep = (do
optional $ char '!'
- choiceInState [
+ choice [
includedirectivep
,aliasdirectivep
,endaliasesdirectivep
@@ -201,7 +203,7 @@ includedirectivep = do
either
(throwError
. ((show parentpos ++ " in included file " ++ show filename ++ ":\n") ++)
- . show)
+ . parseErrorPretty)
(return . journalAddFile (filepath, txt))
ej1
case ej of
@@ -215,6 +217,7 @@ newJournalWithParseStateFrom j = mempty{
,jparsedefaultcommodity = jparsedefaultcommodity j
,jparseparentaccounts = jparseparentaccounts j
,jparsealiases = jparsealiases j
+ ,jcommodities = jcommodities j
-- ,jparsetransactioncount = jparsetransactioncount j
,jparsetimeclockentries = jparsetimeclockentries j
}
@@ -292,9 +295,19 @@ formatdirectivep expectedsym = do
else parserErrorAt pos $
printf "commodity directive symbol \"%s\" and format directive symbol \"%s\" should be the same" expectedsym acommodity
+keywordp :: String -> JournalParser m ()
+keywordp = (() <$) . string . fromString
+
+spacesp :: JournalParser m ()
+spacesp = () <$ lift (some spacenonewline)
+
+-- | Backtracking parser similar to string, but allows varying amount of space between words
+keywordsp :: String -> JournalParser m ()
+keywordsp = try . sequence_ . intersperse spacesp . map keywordp . words
+
applyaccountdirectivep :: JournalParser m ()
applyaccountdirectivep = do
- string "apply" >> lift (some spacenonewline) >> string "account"
+ keywordsp "apply account" <?> "apply account directive"
lift (some spacenonewline)
parent <- lift accountnamep
newline
@@ -302,7 +315,7 @@ applyaccountdirectivep = do
endapplyaccountdirectivep :: JournalParser m ()
endapplyaccountdirectivep = do
- string "end" >> lift (some spacenonewline) >> string "apply" >> lift (some spacenonewline) >> string "account"
+ keywordsp "end apply account" <?> "end apply account directive"
popParentAccount
aliasdirectivep :: JournalParser m ()
@@ -338,7 +351,7 @@ regexaliasp = do
endaliasesdirectivep :: JournalParser m ()
endaliasesdirectivep = do
- string "end aliases"
+ keywordsp "end aliases" <?> "end aliases directive"
clearAccountAliases
tagdirectivep :: JournalParser m ()
@@ -351,7 +364,7 @@ tagdirectivep = do
endtagdirectivep :: JournalParser m ()
endtagdirectivep = do
- (string "end tag" <|> string "pop") <?> "end tag or pop directive"
+ (keywordsp "end tag" <|> keywordp "pop") <?> "end tag or pop directive"
lift restofline
return ()
diff --git a/Hledger/Read/TimedotReader.hs b/Hledger/Read/TimedotReader.hs
index cfea7a3..9a433a5 100644
--- a/Hledger/Read/TimedotReader.hs
+++ b/Hledger/Read/TimedotReader.hs
@@ -141,7 +141,7 @@ timedotdurationp = try timedotnumericp <|> timedotdotsp
-- @
timedotnumericp :: JournalParser m Quantity
timedotnumericp = do
- (q, _, _, _) <- lift numberp
+ (q, _, _, _) <- lift $ numberp Nothing
msymbol <- optional $ choice $ map (string . fst) timeUnits
lift (many spacenonewline)
let q' =
diff --git a/Hledger/Reports/BalanceReport.hs b/Hledger/Reports/BalanceReport.hs
index 26c3e7f..6bbec3e 100644
--- a/Hledger/Reports/BalanceReport.hs
+++ b/Hledger/Reports/BalanceReport.hs
@@ -17,9 +17,6 @@ module Hledger.Reports.BalanceReport (
BalanceReport,
BalanceReportItem,
balanceReport,
- balanceReportValue,
- mixedAmountValue,
- amountValue,
flatShowsExclusiveBalance,
-- * Tests
@@ -32,7 +29,6 @@ import Data.Ord
import Data.Maybe
import Data.Time.Calendar
import Test.HUnit
-import qualified Data.Text as T
import Hledger.Data
import Hledger.Read (mamountp')
@@ -152,57 +148,6 @@ balanceReportItem opts q a
-- items = [(a,a',n, headDef 0 bs) | ((a,a',n), bs) <- mbrrows]
-- total = headDef 0 mbrtotals
--- | Convert all the amounts in a single-column balance report to
--- their value on the given date in their default valuation
--- commodities.
-balanceReportValue :: Journal -> Day -> BalanceReport -> BalanceReport
-balanceReportValue j d r = r'
- where
- (items,total) = r
- r' =
- dbg8 "known market prices" (jmarketprices j) `seq`
- dbg8 "report end date" d `seq`
- dbg8 "balanceReportValue"
- ([(n, n', i, mixedAmountValue j d a) |(n,n',i,a) <- items], mixedAmountValue j d total)
-
-mixedAmountValue :: Journal -> Day -> MixedAmount -> MixedAmount
-mixedAmountValue j d (Mixed as) = Mixed $ map (amountValue j d) as
-
--- | Find the market value of this amount on the given date, in it's
--- default valuation commodity, based on recorded market prices.
--- If no default valuation commodity can be found, the amount is left
--- unchanged.
-amountValue :: Journal -> Day -> Amount -> Amount
-amountValue j d a =
- case commodityValue j d (acommodity a) of
- Just v -> v{aquantity=aquantity v * aquantity a
- ,aprice=aprice a
- }
- Nothing -> a
-
--- | Find the market value, if known, of one unit of this commodity (A) on
--- the given valuation date, in the commodity (B) mentioned in the latest
--- applicable market price. The latest applicable market price is the market
--- price directive for commodity A with the latest date that is on or before
--- the valuation date; or if there are multiple such prices with the same date,
--- the last parsed.
-commodityValue :: Journal -> Day -> CommoditySymbol -> Maybe Amount
-commodityValue j valuationdate c
- | null applicableprices = dbg Nothing
- | otherwise = dbg $ Just $ mpamount $ last applicableprices
- where
- dbg = dbg8 ("using market price for "++T.unpack c)
- applicableprices =
- [p | p <- sortBy (comparing mpdate) $ jmarketprices j
- , mpcommodity p == c
- , mpdate p <= valuationdate
- ]
-
-
-
-
-
-
tests_balanceReport =
let
diff --git a/Hledger/Reports/MultiBalanceReports.hs b/Hledger/Reports/MultiBalanceReports.hs
index eaf8c72..e3ba1e6 100644
--- a/Hledger/Reports/MultiBalanceReports.hs
+++ b/Hledger/Reports/MultiBalanceReports.hs
@@ -9,7 +9,6 @@ module Hledger.Reports.MultiBalanceReports (
MultiBalanceReport(..),
MultiBalanceReportRow,
multiBalanceReport,
- multiBalanceReportValue,
singleBalanceReport,
-- -- * Tests
@@ -234,18 +233,6 @@ multiBalanceReport opts q j = MultiBalanceReport (displayspans, sorteditems, tot
dbg1 s = let p = "multiBalanceReport" in Hledger.Utils.dbg1 (p++" "++s) -- add prefix in this function's debug output
-- dbg1 = const id -- exclude this function from debug output
--- | Convert all the amounts in a multi-column balance report to their
--- value on the given date in their default valuation commodities
--- (which are determined as of that date, not the report interval dates).
-multiBalanceReportValue :: Journal -> Day -> MultiBalanceReport -> MultiBalanceReport
-multiBalanceReportValue j d r = r'
- where
- MultiBalanceReport (spans, rows, (coltotals, rowtotaltotal, rowavgtotal)) = r
- r' = MultiBalanceReport
- (spans,
- [(acct, acct', depth, map convert rowamts, convert rowtotal, convert rowavg) | (acct, acct', depth, rowamts, rowtotal, rowavg) <- rows],
- (map convert coltotals, convert rowtotaltotal, convert rowavgtotal))
- convert = mixedAmountValue j d
tests_multiBalanceReport =
let
diff --git a/Hledger/Reports/ReportOptions.hs b/Hledger/Reports/ReportOptions.hs
index cd02e35..dcbd569 100644
--- a/Hledger/Reports/ReportOptions.hs
+++ b/Hledger/Reports/ReportOptions.hs
@@ -24,9 +24,12 @@ module Hledger.Reports.ReportOptions (
queryOptsFromOpts,
transactionDateFn,
postingDateFn,
+ reportStartEndDates,
reportStartDate,
reportEndDate,
- reportStartEndDates,
+ specifiedStartEndDates,
+ specifiedStartDate,
+ specifiedEndDate,
tests_Hledger_Reports_ReportOptions
)
@@ -104,6 +107,8 @@ data ReportOpts = ReportOpts {
-- eg in the income section of an income statement, this helps --sort-amount know
-- how to sort negative numbers.
,color_ :: Bool
+ ,forecast_ :: Bool
+ ,auto_ :: Bool
} deriving (Show, Data, Typeable)
instance Default ReportOpts where def = defreportopts
@@ -134,6 +139,8 @@ defreportopts = ReportOpts
def
def
def
+ def
+ def
rawOptsToReportOpts :: RawOpts -> IO ReportOpts
rawOptsToReportOpts rawopts = checkReportOpts <$> do
@@ -164,6 +171,8 @@ rawOptsToReportOpts rawopts = checkReportOpts <$> do
,sort_amount_ = boolopt "sort-amount" rawopts'
,pretty_tables_ = boolopt "pretty-tables" rawopts'
,color_ = color
+ ,forecast_ = boolopt "forecast" rawopts'
+ ,auto_ = boolopt "auto" rawopts'
}
-- | Do extra validation of raw option values, raising an error if there's a problem.
@@ -393,32 +402,41 @@ tests_queryOptsFromOpts = [
})
]
--- | The effective report start date is the one specified by options or queries,
--- otherwise the earliest transaction or posting date in the journal,
+-- | The effective report start/end dates are the dates specified by options or queries,
+-- otherwise the earliest/latest transaction or posting date in the journal,
-- otherwise (for an empty journal) nothing.
-- Needs IO to parse smart dates in options/queries.
+reportStartEndDates :: Journal -> ReportOpts -> IO (Maybe (Day,Day))
+reportStartEndDates j ropts = do
+ (mspecifiedstartdate, mspecifiedenddate) <- specifiedStartEndDates ropts
+ return $
+ case journalDateSpan False j of -- don't bother with secondary dates
+ DateSpan (Just journalstartdate) (Just journalenddate) ->
+ Just (fromMaybe journalstartdate mspecifiedstartdate, fromMaybe journalenddate mspecifiedenddate)
+ _ -> Nothing
+
reportStartDate :: Journal -> ReportOpts -> IO (Maybe Day)
reportStartDate j ropts = (fst <$>) <$> reportStartEndDates j ropts
--- | The effective report end date is the one specified by options or queries,
--- otherwise the latest transaction or posting date in the journal,
--- otherwise (for an empty journal) nothing.
--- Needs IO to parse smart dates in options/queries.
reportEndDate :: Journal -> ReportOpts -> IO (Maybe Day)
reportEndDate j ropts = (snd <$>) <$> reportStartEndDates j ropts
-reportStartEndDates :: Journal -> ReportOpts -> IO (Maybe (Day,Day))
-reportStartEndDates j ropts = do
+-- | The specified report start/end dates are the dates specified by options or queries, if any.
+-- Needs IO to parse smart dates in options/queries.
+specifiedStartEndDates :: ReportOpts -> IO (Maybe Day, Maybe Day)
+specifiedStartEndDates ropts = do
today <- getCurrentDay
let
q = queryFromOpts today ropts
- mrequestedstartdate = queryStartDate False q
- mrequestedenddate = queryEndDate False q
- return $
- case journalDateSpan False j of -- don't bother with secondary dates
- DateSpan (Just journalstartdate) (Just journalenddate) ->
- Just (fromMaybe journalstartdate mrequestedstartdate, fromMaybe journalenddate mrequestedenddate)
- _ -> Nothing
+ mspecifiedstartdate = queryStartDate False q
+ mspecifiedenddate = queryEndDate False q
+ return (mspecifiedstartdate, mspecifiedenddate)
+
+specifiedStartDate :: ReportOpts -> IO (Maybe Day)
+specifiedStartDate ropts = fst <$> specifiedStartEndDates ropts
+
+specifiedEndDate :: ReportOpts -> IO (Maybe Day)
+specifiedEndDate ropts = snd <$> specifiedStartEndDates ropts
tests_Hledger_Reports_ReportOptions :: Test
diff --git a/Hledger/Utils/Parse.hs b/Hledger/Utils/Parse.hs
index f5041ba..de69798 100644
--- a/Hledger/Utils/Parse.hs
+++ b/Hledger/Utils/Parse.hs
@@ -38,6 +38,9 @@ choice' = choice . map try
choiceInState :: [StateT s (ParsecT MPErr Text m) a] -> StateT s (ParsecT MPErr Text m) a
choiceInState = choice . map try
+surroundedBy :: Applicative m => m openclose -> m a -> m a
+surroundedBy p = between p p
+
parsewith :: Parsec e Text a -> Text -> Either (ParseError Char e) a
parsewith p = runParser p ""
diff --git a/hledger-lib.cabal b/hledger-lib.cabal
index 5d78f52..f7152f3 100644
--- a/hledger-lib.cabal
+++ b/hledger-lib.cabal
@@ -1,9 +1,11 @@
--- This file has been generated from package.yaml by hpack version 0.17.1.
+-- This file has been generated from package.yaml by hpack version 0.20.0.
--
-- see: https://github.com/sol/hpack
+--
+-- hash: c0497f13da483640b446c596007d9e15f8235a01f8b2ae8ffa96f89dbf59ffb7
name: hledger-lib
-version: 1.4
+version: 1.5
synopsis: Core data types, parsers and functionality for the hledger accounting tools
description: This is a reusable library containing hledger's core functionality.
.
@@ -30,18 +32,18 @@ extra-source-files:
README
data-files:
- doc/hledger_csv.5
- doc/hledger_csv.5.info
- doc/hledger_csv.5.txt
- doc/hledger_journal.5
- doc/hledger_journal.5.info
- doc/hledger_journal.5.txt
- doc/hledger_timeclock.5
- doc/hledger_timeclock.5.info
- doc/hledger_timeclock.5.txt
- doc/hledger_timedot.5
- doc/hledger_timedot.5.info
- doc/hledger_timedot.5.txt
+ hledger_csv.5
+ hledger_csv.info
+ hledger_csv.txt
+ hledger_journal.5
+ hledger_journal.info
+ hledger_journal.txt
+ hledger_timeclock.5
+ hledger_timeclock.info
+ hledger_timeclock.txt
+ hledger_timedot.5
+ hledger_timedot.info
+ hledger_timedot.txt
source-repository head
type: git
@@ -52,37 +54,38 @@ library
./.
ghc-options: -Wall -fno-warn-unused-do-bind -fno-warn-name-shadowing -fno-warn-missing-signatures -fno-warn-type-defaults -fno-warn-orphans
build-depends:
- base >=4.8 && <5
- , base-compat >=0.8.1
- , ansi-terminal >= 0.6.2.3 && < 0.8
+ Decimal
+ , HUnit
+ , ansi-terminal >=0.6.2.3
, array
+ , base >=4.8 && <5
+ , base-compat >=0.8.1
, blaze-markup >=0.5.1
, bytestring
- , cmdargs >=0.10 && <0.11
+ , cmdargs >=0.10
, containers
, csv
, data-default >=0.5
- , Decimal
, deepseq
, directory
+ , extra
, filepath
- , hashtables >= 1.2
- , megaparsec >=5.0 && < 6.2
+ , hashtables >=1.2
+ , megaparsec >=5.0
, mtl
, mtl-compat
, old-time
- , parsec >= 3
+ , parsec >=3
, pretty-show >=1.6.4
, regex-tdfa
, safe >=0.2
, semigroups
- , split >=0.1 && <0.3
- , text >=1.2 && <1.3
+ , split >=0.1
+ , text >=1.2
, time >=1.5
- , transformers >=0.2 && <0.6
+ , transformers >=0.2
, uglymemo
- , utf8-string >=0.3.5 && <1.1
- , HUnit
+ , utf8-string >=0.3.5
exposed-modules:
Hledger
Hledger.Data
@@ -140,39 +143,40 @@ test-suite doctests
tests
ghc-options: -Wall -fno-warn-unused-do-bind -fno-warn-name-shadowing -fno-warn-missing-signatures -fno-warn-type-defaults -fno-warn-orphans
build-depends:
- base >=4.8 && <5
- , base-compat >=0.8.1
- , ansi-terminal >= 0.6.2.3 && < 0.8
+ Decimal
+ , Glob >=0.7
+ , HUnit
+ , ansi-terminal >=0.6.2.3
, array
+ , base >=4.8 && <5
+ , base-compat >=0.8.1
, blaze-markup >=0.5.1
, bytestring
- , cmdargs >=0.10 && <0.11
+ , cmdargs >=0.10
, containers
, csv
, data-default >=0.5
- , Decimal
, deepseq
, directory
+ , doctest >=0.8
+ , extra
, filepath
- , hashtables >= 1.2
- , megaparsec >=5.0 && < 6.2
+ , hashtables >=1.2
+ , megaparsec >=5.0
, mtl
, mtl-compat
, old-time
- , parsec >= 3
+ , parsec >=3
, pretty-show >=1.6.4
, regex-tdfa
, safe >=0.2
, semigroups
- , split >=0.1 && <0.3
- , text >=1.2 && <1.3
+ , split >=0.1
+ , text >=1.2
, time >=1.5
- , transformers >=0.2 && <0.6
+ , transformers >=0.2
, uglymemo
- , utf8-string >=0.3.5 && <1.1
- , HUnit
- , doctest >=0.8
- , Glob >=0.7
+ , utf8-string >=0.3.5
other-modules:
Hledger
Hledger.Data
@@ -218,6 +222,7 @@ test-suite doctests
Hledger.Utils.Tree
Hledger.Utils.UTF8IOCompat
Text.Megaparsec.Compat
+ Paths_hledger_lib
default-language: Haskell2010
test-suite hunittests
@@ -228,40 +233,41 @@ test-suite hunittests
tests
ghc-options: -Wall -fno-warn-unused-do-bind -fno-warn-name-shadowing -fno-warn-missing-signatures -fno-warn-type-defaults -fno-warn-orphans
build-depends:
- base >=4.8 && <5
- , base-compat >=0.8.1
- , ansi-terminal >= 0.6.2.3 && < 0.8
+ Decimal
+ , HUnit
+ , ansi-terminal >=0.6.2.3
, array
+ , base >=4.8 && <5
+ , base-compat >=0.8.1
, blaze-markup >=0.5.1
, bytestring
- , cmdargs >=0.10 && <0.11
+ , cmdargs >=0.10
, containers
, csv
, data-default >=0.5
- , Decimal
, deepseq
, directory
+ , extra
, filepath
- , hashtables >= 1.2
- , megaparsec >=5.0 && < 6.2
+ , hashtables >=1.2
+ , hledger-lib
+ , megaparsec >=5.0
, mtl
, mtl-compat
, old-time
- , parsec >= 3
+ , parsec >=3
, pretty-show >=1.6.4
, regex-tdfa
, safe >=0.2
, semigroups
- , split >=0.1 && <0.3
- , text >=1.2 && <1.3
- , time >=1.5
- , transformers >=0.2 && <0.6
- , uglymemo
- , utf8-string >=0.3.5 && <1.1
- , HUnit
- , hledger-lib
+ , split >=0.1
, test-framework
, test-framework-hunit
+ , text >=1.2
+ , time >=1.5
+ , transformers >=0.2
+ , uglymemo
+ , utf8-string >=0.3.5
other-modules:
Hledger
Hledger.Data
@@ -307,4 +313,5 @@ test-suite hunittests
Hledger.Utils.Tree
Hledger.Utils.UTF8IOCompat
Text.Megaparsec.Compat
+ Paths_hledger_lib
default-language: Haskell2010
diff --git a/doc/hledger_csv.5 b/hledger_csv.5
index 7df3714..7b01eba 100644
--- a/doc/hledger_csv.5
+++ b/hledger_csv.5
@@ -1,5 +1,5 @@
-.TH "hledger_csv" "5" "September 2017" "hledger 1.4" "hledger User Manuals"
+.TH "hledger_csv" "5" "December 2017" "hledger 1.5" "hledger User Manuals"
@@ -8,19 +8,77 @@
CSV \- how hledger reads CSV data, and the CSV rules file format
.SH DESCRIPTION
.PP
-hledger can read CSV files, converting each CSV record into a journal
-entry (transaction), if you provide some conversion hints in a "rules
-file".
-This file should be named like the CSV file with an additional
-\f[C]\&.rules\f[] suffix (eg: \f[C]mybank.csv.rules\f[]); or, you can
-specify the file with \f[C]\-\-rules\-file\ PATH\f[].
-hledger will create it if necessary, with some default rules which
-you\[aq]ll need to adjust.
-At minimum, the rules file must specify the \f[C]date\f[] and
+hledger can read CSV (comma\-separated value) files as if they were
+journal files, automatically converting each CSV record into a
+transaction.
+(To learn about \f[I]writing\f[] CSV, see CSV output.)
+.PP
+Converting CSV to transactions requires some special conversion rules.
+These do several things:
+.IP \[bu] 2
+they describe the layout and format of the CSV data
+.IP \[bu] 2
+they can customize the generated journal entries using a simple
+templating language
+.IP \[bu] 2
+they can add refinements based on patterns in the CSV data, eg
+categorizing transactions with more detailed account names.
+.PP
+When reading a CSV file named \f[C]FILE.csv\f[], hledger looks for a
+conversion rules file named \f[C]FILE.csv.rules\f[] in the same
+directory.
+You can override this with the \f[C]\-\-rules\-file\f[] option.
+If the rules file does not exist, hledger will auto\-create one with
+some example rules, which you'll need to adjust.
+.PP
+At minimum, the rules file must identify the \f[C]date\f[] and
\f[C]amount\f[] fields.
-For an example, see Cookbook: convert CSV files.
+It may also be necessary to specify the date format, and the number of
+header lines to skip.
+Eg:
+.IP
+.nf
+\f[C]
+fields\ date,\ _,\ _,\ amount
+date\-format\ \ %d/%m/%Y
+skip\ 1
+\f[]
+.fi
+.PP
+A more complete example:
+.IP
+.nf
+\f[C]
+#\ hledger\ CSV\ rules\ for\ amazon.com\ order\ history
+
+#\ sample:
+#\ "Date","Type","To/From","Name","Status","Amount","Fees","Transaction\ ID"
+#\ "Jul\ 29,\ 2012","Payment","To","Adapteva,\ Inc.","Completed","$25.00","$0.00","17LA58JSK6PRD4HDGLNJQPI1PB9N8DKPVHL"
+
+#\ skip\ one\ header\ line
+skip\ 1
+
+#\ name\ the\ csv\ fields\ (and\ assign\ the\ transaction\[aq]s\ date,\ amount\ and\ code)
+fields\ date,\ _,\ toorfrom,\ name,\ amzstatus,\ amount,\ fees,\ code
+
+#\ how\ to\ parse\ the\ date
+date\-format\ %b\ %\-d,\ %Y
+
+#\ combine\ two\ fields\ to\ make\ the\ description
+description\ %toorfrom\ %name
+
+#\ save\ these\ fields\ as\ tags
+comment\ \ \ \ \ status:%amzstatus,\ fees:%fees
+
+#\ set\ the\ base\ account\ for\ all\ transactions
+account1\ \ \ \ assets:amazon
+
+#\ flip\ the\ sign\ on\ the\ amount
+amount\ \ \ \ \ \ \-%amount
+\f[]
+.fi
.PP
-To learn about \f[I]exporting\f[] CSV, see CSV output.
+For more examples, see Convert CSV files.
.SH CSV RULES
.PP
The following seven kinds of rule can appear in the rules file, in any
@@ -29,10 +87,10 @@ Blank lines and lines beginning with \f[C]#\f[] or \f[C];\f[] are
ignored.
.SS skip
.PP
-\f[C]skip\f[]\f[I]\f[C]N\f[]\f[]
+\f[C]skip\f[]\f[I]\f[CI]N\f[I]\f[]
.PP
Skip this number of CSV records at the beginning.
-You\[aq]ll need this whenever your CSV data contains header lines.
+You'll need this whenever your CSV data contains header lines.
Eg:
.IP
.nf
@@ -43,11 +101,11 @@ skip\ 1
.fi
.SS date\-format
.PP
-\f[C]date\-format\f[]\f[I]\f[C]DATEFMT\f[]\f[]
+\f[C]date\-format\f[]\f[I]\f[CI]DATEFMT\f[I]\f[]
.PP
When your CSV date fields are not formatted like \f[C]YYYY/MM/DD\f[] (or
-\f[C]YYYY\-MM\-DD\f[] or \f[C]YYYY.MM.DD\f[]), you\[aq]ll need to
-specify the format.
+\f[C]YYYY\-MM\-DD\f[] or \f[C]YYYY.MM.DD\f[]), you'll need to specify
+the format.
DATEFMT is a strptime\-like date parsing pattern, which must parse the
date field values completely.
Examples:
@@ -81,8 +139,8 @@ date\-format\ %\-m/%\-d/%Y\ %l:%M\ %p
.fi
.SS field list
.PP
-\f[C]fields\f[]\f[I]\f[C]FIELDNAME1\f[]\f[],
-\f[I]\f[C]FIELDNAME2\f[]\f[]...
+\f[C]fields\f[]\f[I]\f[CI]FIELDNAME1\f[I]\f[],
+\f[I]\f[CI]FIELDNAME2\f[I]\f[]\&...
.PP
This (a) names the CSV fields, in order (names may not contain
whitespace; uninteresting names may be left blank), and (b) assigns them
@@ -106,7 +164,7 @@ fields\ date,\ description,\ ,\ amount,\ ,\ ,\ somefield,\ anotherfield
.fi
.SS field assignment
.PP
-\f[I]\f[C]ENTRYFIELDNAME\f[]\f[] \f[I]\f[C]FIELDVALUE\f[]\f[]
+\f[I]\f[CI]ENTRYFIELDNAME\f[I]\f[] \f[I]\f[CI]FIELDVALUE\f[I]\f[]
.PP
This sets a journal entry field (one of the standard names above) to the
given text value, which can include CSV field values interpolated by
@@ -130,30 +188,30 @@ comment\ note:\ %somefield\ \-\ %anotherfield,\ date:\ %1
Field assignments can be used instead of or in addition to a field list.
.SS conditional block
.PP
-\f[C]if\f[] \f[I]\f[C]PATTERN\f[]\f[]
+\f[C]if\f[] \f[I]\f[CI]PATTERN\f[I]\f[]
.PD 0
.P
.PD
-\ \ \ \ \f[I]\f[C]FIELDASSIGNMENTS\f[]\f[]...
+\ \ \ \ \f[I]\f[CI]FIELDASSIGNMENTS\f[I]\f[]\&...
.PP
\f[C]if\f[]
.PD 0
.P
.PD
-\f[I]\f[C]PATTERN\f[]\f[]
+\f[I]\f[CI]PATTERN\f[I]\f[]
.PD 0
.P
.PD
-\f[I]\f[C]PATTERN\f[]\f[]...
+\f[I]\f[CI]PATTERN\f[I]\f[]\&...
.PD 0
.P
.PD
-\ \ \ \ \f[I]\f[C]FIELDASSIGNMENTS\f[]\f[]...
+\ \ \ \ \f[I]\f[CI]FIELDASSIGNMENTS\f[I]\f[]\&...
.PP
This applies one or more field assignments, only to those CSV records
matched by one of the PATTERNs.
The patterns are case\-insensitive regular expressions which match
-anywhere within the whole CSV record (it\[aq]s not yet possible to match
+anywhere within the whole CSV record (it's not yet possible to match
within a specific field).
When there are multiple patterns they can be written on separate lines,
unindented.
@@ -182,11 +240,11 @@ banking\ thru\ software
.fi
.SS include
.PP
-\f[C]include\f[]\f[I]\f[C]RULESFILE\f[]\f[]
+\f[C]include\f[]\f[I]\f[CI]RULESFILE\f[I]\f[]
.PP
Include another rules file at this point.
\f[C]RULESFILE\f[] is either an absolute file path or a path relative to
-the current file\[aq]s directory.
+the current file's directory.
Eg:
.IP
.nf
@@ -203,9 +261,9 @@ Consider adding this rule if all of the following are true: you might be
processing just one day of data, your CSV records are in reverse
chronological order (newest first), and you care about preserving the
order of same\-day transactions.
-It usually isn\[aq]t needed, because hledger autodetects the CSV order,
-but when all CSV records have the same date it will assume they are
-oldest first.
+It usually isn't needed, because hledger autodetects the CSV order, but
+when all CSV records have the same date it will assume they are oldest
+first.
.SH CSV TIPS
.SS CSV ordering
.PP
@@ -216,9 +274,8 @@ case where you might need \f[C]newest\-first\f[], see above).
.PP
Each journal entry will have two postings, to \f[C]account1\f[] and
\f[C]account2\f[] respectively.
-It\[aq]s not yet possible to generate entries with more than two
-postings.
-It\[aq]s conventional and recommended to use \f[C]account1\f[] for the
+It's not yet possible to generate entries with more than two postings.
+It's conventional and recommended to use \f[C]account1\f[] for the
account whose CSV we are reading.
.SS CSV amounts
.PP
diff --git a/doc/hledger_csv.5.info b/hledger_csv.info
index 15cb6ce..b420094 100644
--- a/doc/hledger_csv.5.info
+++ b/hledger_csv.info
@@ -1,28 +1,75 @@
-This is hledger_csv.5.info, produced by makeinfo version 6.0 from stdin.
+This is hledger_csv.info, produced by makeinfo version 6.5 from stdin.

-File: hledger_csv.5.info, Node: Top, Next: CSV RULES, Up: (dir)
+File: hledger_csv.info, Node: Top, Next: CSV RULES, Up: (dir)
-hledger_csv(5) hledger 1.4
+hledger_csv(5) hledger 1.5
**************************
-hledger can read CSV files, converting each CSV record into a journal
-entry (transaction), if you provide some conversion hints in a "rules
-file". This file should be named like the CSV file with an additional
-'.rules' suffix (eg: 'mybank.csv.rules'); or, you can specify the file
-with '--rules-file PATH'. hledger will create it if necessary, with
-some default rules which you'll need to adjust. At minimum, the rules
-file must specify the 'date' and 'amount' fields. For an example, see
-Cookbook: convert CSV files.
+hledger can read CSV (comma-separated value) files as if they were
+journal files, automatically converting each CSV record into a
+transaction. (To learn about _writing_ CSV, see CSV output.)
- To learn about _exporting_ CSV, see CSV output.
+ Converting CSV to transactions requires some special conversion
+rules. These do several things:
+
+ * they describe the layout and format of the CSV data
+ * they can customize the generated journal entries using a simple
+ templating language
+ * they can add refinements based on patterns in the CSV data, eg
+ categorizing transactions with more detailed account names.
+
+ When reading a CSV file named 'FILE.csv', hledger looks for a
+conversion rules file named 'FILE.csv.rules' in the same directory. You
+can override this with the '--rules-file' option. If the rules file
+does not exist, hledger will auto-create one with some example rules,
+which you'll need to adjust.
+
+ At minimum, the rules file must identify the 'date' and 'amount'
+fields. It may also be necessary to specify the date format, and the
+number of header lines to skip. Eg:
+
+fields date, _, _, amount
+date-format %d/%m/%Y
+skip 1
+
+ A more complete example:
+
+# hledger CSV rules for amazon.com order history
+
+# sample:
+# "Date","Type","To/From","Name","Status","Amount","Fees","Transaction ID"
+# "Jul 29, 2012","Payment","To","Adapteva, Inc.","Completed","$25.00","$0.00","17LA58JSK6PRD4HDGLNJQPI1PB9N8DKPVHL"
+
+# skip one header line
+skip 1
+
+# name the csv fields (and assign the transaction's date, amount and code)
+fields date, _, toorfrom, name, amzstatus, amount, fees, code
+
+# how to parse the date
+date-format %b %-d, %Y
+
+# combine two fields to make the description
+description %toorfrom %name
+
+# save these fields as tags
+comment status:%amzstatus, fees:%fees
+
+# set the base account for all transactions
+account1 assets:amazon
+
+# flip the sign on the amount
+amount -%amount
+
+ For more examples, see Convert CSV files.
* Menu:
* CSV RULES::
* CSV TIPS::

-File: hledger_csv.5.info, Node: CSV RULES, Next: CSV TIPS, Prev: Top, Up: Top
+File: hledger_csv.info, Node: CSV RULES, Next: CSV TIPS, Prev: Top, Up: Top
1 CSV RULES
***********
@@ -40,7 +87,7 @@ order. Blank lines and lines beginning with '#' or ';' are ignored.
* newest-first::

-File: hledger_csv.5.info, Node: skip, Next: date-format, Up: CSV RULES
+File: hledger_csv.info, Node: skip, Next: date-format, Up: CSV RULES
1.1 skip
========
@@ -54,7 +101,7 @@ whenever your CSV data contains header lines. Eg:
skip 1

-File: hledger_csv.5.info, Node: date-format, Next: field list, Prev: skip, Up: CSV RULES
+File: hledger_csv.info, Node: date-format, Next: field list, Prev: skip, Up: CSV RULES
1.2 date-format
===============
@@ -79,7 +126,7 @@ date-format %Y-%h-%d
date-format %-m/%-d/%Y %l:%M %p

-File: hledger_csv.5.info, Node: field list, Next: field assignment, Prev: date-format, Up: CSV RULES
+File: hledger_csv.info, Node: field list, Next: field assignment, Prev: date-format, Up: CSV RULES
1.3 field list
==============
@@ -102,7 +149,7 @@ Eg:
fields date, description, , amount, , , somefield, anotherfield

-File: hledger_csv.5.info, Node: field assignment, Next: conditional block, Prev: field list, Up: CSV RULES
+File: hledger_csv.info, Node: field assignment, Next: conditional block, Prev: field list, Up: CSV RULES
1.4 field assignment
====================
@@ -123,7 +170,7 @@ comment note: %somefield - %anotherfield, date: %1
list.

-File: hledger_csv.5.info, Node: conditional block, Next: include, Prev: field assignment, Up: CSV RULES
+File: hledger_csv.info, Node: conditional block, Next: include, Prev: field assignment, Up: CSV RULES
1.5 conditional block
=====================
@@ -157,7 +204,7 @@ banking thru software
comment XXX deductible ? check it

-File: hledger_csv.5.info, Node: include, Next: newest-first, Prev: conditional block, Up: CSV RULES
+File: hledger_csv.info, Node: include, Next: newest-first, Prev: conditional block, Up: CSV RULES
1.6 include
===========
@@ -172,7 +219,7 @@ Eg:
include common.rules

-File: hledger_csv.5.info, Node: newest-first, Prev: include, Up: CSV RULES
+File: hledger_csv.info, Node: newest-first, Prev: include, Up: CSV RULES
1.7 newest-first
================
@@ -187,7 +234,7 @@ hledger autodetects the CSV order, but when all CSV records have the
same date it will assume they are oldest first.

-File: hledger_csv.5.info, Node: CSV TIPS, Prev: CSV RULES, Up: Top
+File: hledger_csv.info, Node: CSV TIPS, Prev: CSV RULES, Up: Top
2 CSV TIPS
**********
@@ -201,7 +248,7 @@ File: hledger_csv.5.info, Node: CSV TIPS, Prev: CSV RULES, Up: Top
* Reading multiple CSV files::

-File: hledger_csv.5.info, Node: CSV ordering, Next: CSV accounts, Up: CSV TIPS
+File: hledger_csv.info, Node: CSV ordering, Next: CSV accounts, Up: CSV TIPS
2.1 CSV ordering
================
@@ -211,7 +258,7 @@ same-day entries will be preserved (except in the special case where you
might need 'newest-first', see above).

-File: hledger_csv.5.info, Node: CSV accounts, Next: CSV amounts, Prev: CSV ordering, Up: CSV TIPS
+File: hledger_csv.info, Node: CSV accounts, Next: CSV amounts, Prev: CSV ordering, Up: CSV TIPS
2.2 CSV accounts
================
@@ -222,7 +269,7 @@ two postings. It's conventional and recommended to use 'account1' for
the account whose CSV we are reading.

-File: hledger_csv.5.info, Node: CSV amounts, Next: CSV balance assertions, Prev: CSV accounts, Up: CSV TIPS
+File: hledger_csv.info, Node: CSV amounts, Next: CSV balance assertions, Prev: CSV accounts, Up: CSV TIPS
2.3 CSV amounts
===============
@@ -247,7 +294,7 @@ fields (giving more control, eg to put the currency symbol on the
right).

-File: hledger_csv.5.info, Node: CSV balance assertions, Next: Reading multiple CSV files, Prev: CSV amounts, Up: CSV TIPS
+File: hledger_csv.info, Node: CSV balance assertions, Next: Reading multiple CSV files, Prev: CSV amounts, Up: CSV TIPS
2.4 CSV balance assertions
==========================
@@ -257,7 +304,7 @@ If the CSV includes a running balance, you can assign that to the
it will be asserted as the balance after the 'account1' posting.

-File: hledger_csv.5.info, Node: Reading multiple CSV files, Prev: CSV balance assertions, Up: CSV TIPS
+File: hledger_csv.info, Node: Reading multiple CSV files, Prev: CSV balance assertions, Up: CSV TIPS
2.5 Reading multiple CSV files
==============================
@@ -269,34 +316,34 @@ one rules file will be used for all the CSV files being read.

Tag Table:
-Node: Top74
-Node: CSV RULES810
-Ref: #csv-rules920
-Node: skip1182
-Ref: #skip1278
-Node: date-format1450
-Ref: #date-format1579
-Node: field list2085
-Ref: #field-list2224
-Node: field assignment2929
-Ref: #field-assignment3086
-Node: conditional block3590
-Ref: #conditional-block3746
-Node: include4642
-Ref: #include4774
-Node: newest-first5005
-Ref: #newest-first5121
-Node: CSV TIPS5532
-Ref: #csv-tips5628
-Node: CSV ordering5746
-Ref: #csv-ordering5866
-Node: CSV accounts6047
-Ref: #csv-accounts6187
-Node: CSV amounts6441
-Ref: #csv-amounts6589
-Node: CSV balance assertions7364
-Ref: #csv-balance-assertions7548
-Node: Reading multiple CSV files7753
-Ref: #reading-multiple-csv-files7925
+Node: Top72
+Node: CSV RULES2161
+Ref: #csv-rules2269
+Node: skip2531
+Ref: #skip2625
+Node: date-format2797
+Ref: #date-format2924
+Node: field list3430
+Ref: #field-list3567
+Node: field assignment4272
+Ref: #field-assignment4427
+Node: conditional block4931
+Ref: #conditional-block5085
+Node: include5981
+Ref: #include6111
+Node: newest-first6342
+Ref: #newest-first6456
+Node: CSV TIPS6867
+Ref: #csv-tips6961
+Node: CSV ordering7079
+Ref: #csv-ordering7197
+Node: CSV accounts7378
+Ref: #csv-accounts7516
+Node: CSV amounts7770
+Ref: #csv-amounts7916
+Node: CSV balance assertions8691
+Ref: #csv-balance-assertions8873
+Node: Reading multiple CSV files9078
+Ref: #reading-multiple-csv-files9248

End Tag Table
diff --git a/doc/hledger_csv.5.txt b/hledger_csv.txt
index 166c008..fa19627 100644
--- a/doc/hledger_csv.5.txt
+++ b/hledger_csv.txt
@@ -7,16 +7,65 @@ NAME
CSV - how hledger reads CSV data, and the CSV rules file format
DESCRIPTION
- hledger can read CSV files, converting each CSV record into a journal
- entry (transaction), if you provide some conversion hints in a "rules
- file". This file should be named like the CSV file with an additional
- .rules suffix (eg: mybank.csv.rules); or, you can specify the file with
- --rules-file PATH. hledger will create it if necessary, with some
- default rules which you'll need to adjust. At minimum, the rules file
- must specify the date and amount fields. For an example, see Cookbook:
- convert CSV files.
+ hledger can read CSV (comma-separated value) files as if they were
+ journal files, automatically converting each CSV record into a transac-
+ tion. (To learn about writing CSV, see CSV output.)
- To learn about exporting CSV, see CSV output.
+ Converting CSV to transactions requires some special conversion rules.
+ These do several things:
+
+ o they describe the layout and format of the CSV data
+
+ o they can customize the generated journal entries using a simple tem-
+ plating language
+
+ o they can add refinements based on patterns in the CSV data, eg cate-
+ gorizing transactions with more detailed account names.
+
+ When reading a CSV file named FILE.csv, hledger looks for a conversion
+ rules file named FILE.csv.rules in the same directory. You can over-
+ ride this with the --rules-file option. If the rules file does not
+ exist, hledger will auto-create one with some example rules, which
+ you'll need to adjust.
+
+ At minimum, the rules file must identify the date and amount fields.
+ It may also be necessary to specify the date format, and the number of
+ header lines to skip. Eg:
+
+ fields date, _, _, amount
+ date-format %d/%m/%Y
+ skip 1
+
+ A more complete example:
+
+ # hledger CSV rules for amazon.com order history
+
+ # sample:
+ # "Date","Type","To/From","Name","Status","Amount","Fees","Transaction ID"
+ # "Jul 29, 2012","Payment","To","Adapteva, Inc.","Completed","$25.00","$0.00","17LA58JSK6PRD4HDGLNJQPI1PB9N8DKPVHL"
+
+ # skip one header line
+ skip 1
+
+ # name the csv fields (and assign the transaction's date, amount and code)
+ fields date, _, toorfrom, name, amzstatus, amount, fees, code
+
+ # how to parse the date
+ date-format %b %-d, %Y
+
+ # combine two fields to make the description
+ description %toorfrom %name
+
+ # save these fields as tags
+ comment status:%amzstatus, fees:%fees
+
+ # set the base account for all transactions
+ account1 assets:amazon
+
+ # flip the sign on the amount
+ amount -%amount
+
+ For more examples, see Convert CSV files.
CSV RULES
The following seven kinds of rule can appear in the rules file, in any
@@ -200,4 +249,4 @@ SEE ALSO
-hledger 1.4 September 2017 hledger_csv(5)
+hledger 1.5 December 2017 hledger_csv(5)
diff --git a/doc/hledger_journal.5 b/hledger_journal.5
index 6b868f7..ae43d51 100644
--- a/doc/hledger_journal.5
+++ b/hledger_journal.5
@@ -1,36 +1,34 @@
.\"t
-.TH "hledger_journal" "5" "September 2017" "hledger 1.4" "hledger User Manuals"
+.TH "hledger_journal" "5" "December 2017" "hledger 1.5" "hledger User Manuals"
.SH NAME
.PP
-Journal \- hledger\[aq]s default file format, representing a General
-Journal
+Journal \- hledger's default file format, representing a General Journal
.SH DESCRIPTION
.PP
-hledger\[aq]s usual data source is a plain text file containing journal
+hledger's usual data source is a plain text file containing journal
entries in hledger journal format.
This file represents a standard accounting general journal.
-I use file names ending in \f[C]\&.journal\f[], but that\[aq]s not
-required.
+I use file names ending in \f[C]\&.journal\f[], but that's not required.
The journal file contains a number of transaction entries, each
describing a transfer of money (or any commodity) between two or more
named accounts, in a simple format readable by both hledger and humans.
.PP
-hledger\[aq]s journal format is a compatible subset, mostly, of
-ledger\[aq]s journal format, so hledger can work with compatible ledger
-journal files as well.
-It\[aq]s safe, and encouraged, to run both hledger and ledger on the
-same journal file, eg to validate the results you\[aq]re getting.
+hledger's journal format is a compatible subset, mostly, of ledger's
+journal format, so hledger can work with compatible ledger journal files
+as well.
+It's safe, and encouraged, to run both hledger and ledger on the same
+journal file, eg to validate the results you're getting.
.PP
You can use hledger without learning any more about this file; just use
the add or web commands to create and update it.
Many users, though, also edit the journal file directly with a text
editor, perhaps assisted by the helper modes for emacs or vim.
.PP
-Here\[aq]s an example:
+Here's an example:
.IP
.nf
\f[C]
@@ -83,7 +81,7 @@ line or a semicolon)
semicolon until end of line)
.PP
Then comes zero or more (but usually at least 2) indented lines
-representing...
+representing\&...
.SS Postings
.PP
A posting is an addition of some amount to, or removal of some amount
@@ -136,12 +134,12 @@ The primary date, on the left, is used by default; the secondary date,
on the right, is used when the \f[C]\-\-date2\f[] flag is specified
(\f[C]\-\-aux\-date\f[] or \f[C]\-\-effective\f[] also work).
.PP
-The meaning of secondary dates is up to you, but it\[aq]s best to follow
-a consistent rule.
-Eg write the bank\[aq]s clearing date as primary, and when needed, the
-date the transaction was initiated as secondary.
+The meaning of secondary dates is up to you, but it's best to follow a
+consistent rule.
+Eg write the bank's clearing date as primary, and when needed, the date
+the transaction was initiated as secondary.
.PP
-Here\[aq]s an example.
+Here's an example.
Note that a secondary date will use the year of the primary date if
unspecified.
.IP
@@ -205,14 +203,14 @@ $\ hledger\ \-f\ t.j\ register\ checking
.fi
.PP
DATE should be a simple date; if the year is not specified it will use
-the year of the transaction\[aq]s date.
+the year of the transaction's date.
You can set the secondary date similarly, with \f[C]date2:DATE2\f[].
The \f[C]date:\f[] or \f[C]date2:\f[] tags must have a valid simple date
value if they are present, eg a \f[C]date:\f[] tag with no value is not
allowed.
.PP
-Ledger\[aq]s earlier, more compact bracketed date syntax is also
-supported: \f[C][DATE]\f[], \f[C][DATE=DATE2]\f[] or \f[C][=DATE2]\f[].
+Ledger's earlier, more compact bracketed date syntax is also supported:
+\f[C][DATE]\f[], \f[C][DATE=DATE2]\f[] or \f[C][=DATE2]\f[].
hledger will attempt to parse any square\-bracketed sequence of the
\f[C]0123456789/\-.=\f[] characters in this way.
With this syntax, DATE infers its year from the transaction and DATE2
@@ -256,11 +254,11 @@ When reporting, you can filter by status with the
\f[C]status:!\f[], and \f[C]status:*\f[] queries; or the U, P, C keys in
hledger\-ui.
.PP
-Note, in Ledger and in older versions of hledger, the "unmarked" state
-is called "uncleared".
+Note, in Ledger and in older versions of hledger, the \[lq]unmarked\[rq]
+state is called \[lq]uncleared\[rq].
As of hledger 1.3 we have renamed it to unmarked for clarity.
.PP
-To replicate Ledger and old hledger\[aq]s behaviour of also matching
+To replicate Ledger and old hledger's behaviour of also matching
pending, combine \-U and \-P.
.PP
Status marks are optional, but can be helpful eg for reconciling with
@@ -270,12 +268,13 @@ status.
Eg in Emacs ledger\-mode, you can toggle transaction status with C\-c
C\-e, or posting status with C\-c C\-c.
.PP
-What "uncleared", "pending", and "cleared" actually mean is up to you.
-Here\[aq]s one suggestion:
+What \[lq]uncleared\[rq], \[lq]pending\[rq], and \[lq]cleared\[rq]
+actually mean is up to you.
+Here's one suggestion:
.PP
.TS
tab(@);
-lw(10.5n) lw(59.5n).
+lw(9.9n) lw(60.1n).
T{
status
T}@T{
@@ -305,10 +304,10 @@ bank soon (like uncashed checks), and no flags to see the most
up\-to\-date state of your finances.
.SS Description
.PP
-A transaction\[aq]s description is the rest of the line following the
-date and status mark (or until a comment begins).
-Sometimes called the "narration" in traditional bookkeeping, it can be
-used for whatever you wish, or left blank.
+A transaction's description is the rest of the line following the date
+and status mark (or until a comment begins).
+Sometimes called the \[lq]narration\[rq] in traditional bookkeeping, it
+can be used for whatever you wish, or left blank.
Transaction descriptions can be queried, unlike comments.
.SS Payee and note
.PP
@@ -366,11 +365,15 @@ Some examples:
.P
.PD
\f[C]EUR\ \-2.000.000,00\f[]
+.PD 0
+.P
+.PD
+\f[C]1\ 999\ 999.9455\f[]
.PP
As you can see, the amount format is somewhat flexible:
.IP \[bu] 2
-amounts are a number (the "quantity") and optionally a currency
-symbol/commodity name (the "commodity").
+amounts are a number (the \[lq]quantity\[rq]) and optionally a currency
+symbol/commodity name (the \[lq]commodity\[rq]).
.IP \[bu] 2
the commodity is a symbol, word, or phrase, on the left or right, with
or without a separating space.
@@ -381,10 +384,32 @@ negative amounts with a commodity on the left can have the minus sign
before or after it
.IP \[bu] 2
digit groups (thousands, or any other grouping) can be separated by
-commas (in which case period is used for decimal point) or periods (in
-which case comma is used for decimal point)
+space or comma or period and should be used as separator between all
+groups
+.IP \[bu] 2
+decimal part can be separated by comma or period and should be different
+from digit groups separator
+.PP
+You can use any of these variations when recording data.
+However, there is some ambiguous way of representing numbers like
+\f[C]$1.000\f[] and \f[C]$1,000\f[] both may mean either one thousand or
+one dollar.
+By default hledger will assume that this is sole delimiter is used only
+for decimals.
+On the other hand commodity format declared prior to that line will help
+to resolve that ambiguity differently:
+.IP
+.nf
+\f[C]
+commodity\ $1,000.00
+
+2017/12/25\ New\ life\ of\ Scrooge
+\ \ \ \ expenses:gifts\ \ $1,000
+\ \ \ \ assets
+\f[]
+.fi
.PP
-You can use any of these variations when recording data, but when
+Though journal may contain mixed styles to represent amount, when
hledger displays amounts, it will choose a consistent format for each
commodity.
(Except for price amounts, which are always formatted as written).
@@ -399,13 +424,12 @@ will be the maximum from all posting amounts in that commmodity
or if there are no such amounts in the journal, a default format is used
(like \f[C]$1000.00\f[]).
.PP
-Price amounts and amounts in D directives usually don\[aq]t affect
-amount format inference, but in some situations they can do so
-indirectly.
-(Eg when D\[aq]s default commodity is applied to a commodity\-less
-amount, or when an amountless posting is balanced using a price\[aq]s
-commodity, or when \-V is used.) If you find this causing problems, set
-the desired format with a commodity directive.
+Price amounts and amounts in D directives usually don't affect amount
+format inference, but in some situations they can do so indirectly.
+(Eg when D's default commodity is applied to a commodity\-less amount,
+or when an amountless posting is balanced using a price's commodity, or
+when \-V is used.) If you find this causing problems, set the desired
+format with a commodity directive.
.SS Virtual Postings
.PP
When you parenthesise the account name in a posting, we call that a
@@ -416,7 +440,7 @@ it is ignored when checking that the transaction is balanced
it is excluded from reports when the \f[C]\-\-real/\-R\f[] flag is used,
or the \f[C]real:1\f[] query.
.PP
-You could use this, eg, to set an account\[aq]s opening balance without
+You could use this, eg, to set an account's opening balance without
needing to use the \f[C]equity:opening\ balances\f[] account:
.IP
.nf
@@ -450,8 +474,7 @@ which is more correct and provides better error checking.
.SS Balance Assertions
.PP
hledger supports Ledger\-style balance assertions in journal files.
-These look like \f[C]=EXPECTEDBALANCE\f[] following a posting\[aq]s
-amount.
+These look like \f[C]=EXPECTEDBALANCE\f[] following a posting's amount.
Eg in this example we assert the expected dollar balance in accounts a
and b after each posting:
.IP
@@ -476,7 +499,7 @@ You can disable them temporarily with the
troubleshooting or for reading Ledger files.
.SS Assertions and ordering
.PP
-hledger sorts an account\[aq]s postings and assertions first by date and
+hledger sorts an account's postings and assertions first by date and
then (for postings on the same day) by parse order.
Note this is different from Ledger, which sorts assertions only by parse
order.
@@ -495,33 +518,33 @@ intra\-day balances.
With included files, things are a little more complicated.
Including preserves the ordering of postings and assertions.
If you have multiple postings to an account on the same day, split
-across different files, and you also want to assert the account\[aq]s
-balance on the same day, you\[aq]ll have to put the assertion in the
-right file.
+across different files, and you also want to assert the account's
+balance on the same day, you'll have to put the assertion in the right
+file.
.SS Assertions and multiple \-f options
.PP
-Balance assertions don\[aq]t work well across files specified with
-multiple \-f options.
+Balance assertions don't work well across files specified with multiple
+\-f options.
Use include or concatenate the files instead.
.SS Assertions and commodities
.PP
The asserted balance must be a simple single\-commodity amount, and in
-fact the assertion checks only this commodity\[aq]s balance within the
+fact the assertion checks only this commodity's balance within the
(possibly multi\-commodity) account balance.
We could call this a partial balance assertion.
This is compatible with Ledger, and makes it possible to make assertions
about accounts containing multiple commodities.
.PP
-To assert each commodity\[aq]s balance in such a multi\-commodity
-account, you can add multiple postings (with amount 0 if necessary).
-But note that no matter how many assertions you add, you can\[aq]t be
-sure the account does not contain some unexpected commodity.
-(We\[aq]ll add support for this kind of total balance assertion if
-there\[aq]s demand.)
+To assert each commodity's balance in such a multi\-commodity account,
+you can add multiple postings (with amount 0 if necessary).
+But note that no matter how many assertions you add, you can't be sure
+the account does not contain some unexpected commodity.
+(We'll add support for this kind of total balance assertion if there's
+demand.)
.SS Assertions and subaccounts
.PP
Balance assertions do not count the balance from subaccounts; they check
-the posted account\[aq]s exclusive balance.
+the posted account's exclusive balance.
For example:
.IP
.nf
@@ -533,7 +556,7 @@ For example:
\f[]
.fi
.PP
-The balance report\[aq]s flat mode shows these exclusive balances more
+The balance report's flat mode shows these exclusive balances more
clearly:
.IP
.nf
@@ -582,9 +605,9 @@ or when adjusting a balance to reality:
\f[]
.fi
.PP
-The calculated amount depends on the account\[aq]s balance in the
-commodity at that point (which depends on the previously\-dated postings
-of the commodity to that account since the last balance assertion or
+The calculated amount depends on the account's balance in the commodity
+at that point (which depends on the previously\-dated postings of the
+commodity to that account since the last balance assertion or
assignment).
Note that using balance assignments makes your journal a little less
explicit; to know the exact amount posted, you have to run hledger or do
@@ -592,7 +615,7 @@ the calculations yourself, instead of just reading it.
.SS Prices
.SS Transaction prices
.PP
-Within a transaction, you can note an amount\[aq]s price in another
+Within a transaction, you can note an amount's price in another
commodity.
This can be used to document the cost (in a purchase) or selling price
(in a sale).
@@ -643,8 +666,8 @@ hledger infer the price that balances the transaction:
.RE
.PP
Amounts with transaction prices can be displayed in the transaction
-price\[aq]s commodity by using the \f[C]\-B/\-\-cost\f[] flag (except
-for #551) ("B" is from "cost Basis").
+price's commodity by using the \f[C]\-B/\-\-cost\f[] flag (except for
+#551) (\[lq]B\[rq] is from \[lq]cost Basis\[rq]).
Eg for the above, here is how \-B affects the balance report:
.IP
.nf
@@ -661,7 +684,7 @@ $\ hledger\ bal\ \-N\ \-\-flat\ \-B
Note \-B is sensitive to the order of postings when a transaction price
is inferred: the inferred price will be in the commodity of the last
amount.
-So if example 3\[aq]s postings are reversed, while the transaction is
+So if example 3's postings are reversed, while the transaction is
equivalent, \-B shows something different:
.IP
.nf
@@ -716,9 +739,9 @@ P\ 2010/1/1\ €\ $1.40
.SS Comments
.PP
Lines in the journal beginning with a semicolon (\f[C];\f[]) or hash
-(\f[C]#\f[]) or asterisk (\f[C]*\f[]) are comments, and will be ignored.
-(Asterisk comments make it easy to treat your journal like an org\-mode
-outline in emacs.)
+(\f[C]#\f[]) or star (\f[C]*\f[]) are comments, and will be ignored.
+(Star comments cause org\-mode nodes to be ignored, allowing emacs users
+to fold and navigate their journals with org\-mode or orgstruct\-mode.)
.PP
Also, anything between \f[C]comment\f[] and \f[C]end\ comment\f[]
directives is a (multi\-line) comment.
@@ -730,20 +753,22 @@ description and/or indented on the following lines (before the
postings).
Similarly, you can attach comments to an individual posting by writing
them after the amount and/or indented on the following lines.
+Transaction and posting comments must begin with a semicolon
+(\f[C];\f[]).
.PP
Some examples:
.IP
.nf
\f[C]
-#\ a\ journal\ comment
+#\ a\ file\ comment
-;\ also\ a\ journal\ comment
+;\ also\ a\ file\ comment
comment
-This\ is\ a\ multiline\ comment,
+This\ is\ a\ multiline\ file\ comment,
which\ continues\ until\ a\ line
where\ the\ "end\ comment"\ string
-appears\ on\ its\ own.
+appears\ on\ its\ own\ (or\ end\ of\ file).
end\ comment
2012/5/14\ something\ \ ;\ a\ transaction\ comment
@@ -752,7 +777,7 @@ end\ comment
\ \ \ \ posting2
\ \ \ \ ;\ a\ comment\ for\ posting\ 2
\ \ \ \ ;\ another\ comment\ line\ for\ posting\ 2
-;\ a\ journal\ comment\ (because\ not\ indented)
+;\ a\ file\ comment\ (because\ not\ indented)
\f[]
.fi
.SS Tags
@@ -778,8 +803,7 @@ comma or end of line, with leading/trailing whitespace removed:
\f[]
.fi
.PP
-Note this means hledger\[aq]s tag values can not contain commas or
-newlines.
+Note this means hledger's tag values can not contain commas or newlines.
Ending at commas means you can write multiple short tags on one line,
comma separated:
.IP
@@ -791,12 +815,13 @@ comma separated:
.PP
Here,
.IP \[bu] 2
-"\f[C]a\ comment\ containing\f[]" is just comment text, not a tag
+\[lq]\f[C]a\ comment\ containing\f[]\[rq] is just comment text, not a
+tag
.IP \[bu] 2
-"\f[C]tag1\f[]" is a tag with no value
+\[lq]\f[C]tag1\f[]\[rq] is a tag with no value
.IP \[bu] 2
-"\f[C]tag2\f[]" is another tag, whose value is
-"\f[C]some\ value\ ...\f[]"
+\[lq]\f[C]tag2\f[]\[rq] is another tag, whose value is
+\[lq]\f[C]some\ value\ ...\f[]\[rq]
.PP
Tags in a transaction comment affect the transaction and all of its
postings, while tags in a posting comment affect only that posting.
@@ -812,14 +837,14 @@ For example, the following transaction has three tags (\f[C]A\f[],
\f[]
.fi
.PP
-Tags are like Ledger\[aq]s metadata feature, except hledger\[aq]s tag
-values are simple strings.
+Tags are like Ledger's metadata feature, except hledger's tag values are
+simple strings.
.SS Directives
.SS Account aliases
.PP
You can define aliases which rewrite your account names (after reading
the journal, before generating reports).
-hledger\[aq]s account aliases can be useful for:
+hledger's account aliases can be useful for:
.IP \[bu] 2
expanding shorthand account names to their full form, allowing easier
data entry and a less verbose journal
@@ -849,7 +874,7 @@ alias\ OLD\ =\ NEW
Or, you can use the \f[C]\-\-alias\ \[aq]OLD=NEW\[aq]\f[] option on the
command line.
This affects all entries.
-It\[aq]s useful for trying out aliases interactively.
+It's useful for trying out aliases interactively.
.PP
OLD and NEW are full account names.
hledger will replace any occurrence of the old account name with the new
@@ -920,8 +945,8 @@ end\ aliases
.PP
The \f[C]account\f[] directive predefines account names, as in Ledger
and Beancount.
-This may be useful for your own documentation; hledger doesn\[aq]t make
-use of it yet.
+This may be useful for your own documentation; hledger doesn't make use
+of it yet.
.IP
.nf
\f[C]
@@ -1007,7 +1032,7 @@ commodity\ 1,000.0000\ AAAA
\f[]
.fi
.PP
-or on multiple lines, using the "format" subdirective.
+or on multiple lines, using the \[lq]format\[rq] subdirective.
In this case the commodity symbol appears twice and should be the same
in both places:
.IP
@@ -1027,7 +1052,7 @@ commodity\ INR
.PP
The D directive sets a default commodity (and display format), to be
used for amounts without a commodity symbol (ie, plain numbers).
-(Note this differs from Ledger\[aq]s default commodity directive.) The
+(Note this differs from Ledger's default commodity directive.) The
commodity and display format will be applied to all subsequent
commodity\-less amounts, or until the next D directive.
.IP
@@ -1038,14 +1063,14 @@ commodity\-less amounts, or until the next D directive.
D\ $1,000.00
1/1
-\ \ a\ \ \ \ \ 5\ \ \ \ #\ <\-\ commodity\-less\ amount,\ becomes\ $1
+\ \ a\ \ \ \ \ 5\ \ \ \ ;\ <\-\ commodity\-less\ amount,\ becomes\ $1
\ \ b
\f[]
.fi
.SS Default year
.PP
-You can set a default year to be used for subsequent dates which
-don\[aq]t specify a year.
+You can set a default year to be used for subsequent dates which don't
+specify a year.
This is a line beginning with \f[C]Y\f[] followed by the year.
Eg:
.IP
@@ -1085,6 +1110,77 @@ Glob patterns (\f[C]*\f[]) are not currently supported.
.PP
The \f[C]include\f[] directive can only be used in journal files.
It can include journal, timeclock or timedot files, but not CSV files.
+.SH Periodic transactions
+.PP
+A periodic transaction starts with a tilde `~' in place of a date
+followed by a period expression:
+.IP
+.nf
+\f[C]
+~\ weekly
+\ \ assets:bank:checking\ \ \ $400\ ;\ paycheck
+\ \ income:acme\ inc
+\f[]
+.fi
+.PP
+Periodic transactions are used for forecasting and budgeting only, they
+have no effect unless the \f[C]\-\-forecast\f[] or \f[C]\-\-budget\f[]
+flag is used.
+With \f[C]\-\-forecast\f[], each periodic transaction rule generates
+recurring forecast transactions at the specified interval, beginning the
+day after the last recorded journal transaction and ending 6 months from
+today, or at the specified report end date.
+With \f[C]balance\ \-\-budget\f[], each periodic transaction declares
+recurring budget goals for one or more accounts.
+.PD 0
+.P
+.PD
+For more details, see: balance > Budgeting, Budgeting and Forecasting.
+.SH Automated posting rules
+.PP
+Automated posting rule starts with an equal sign `=' in place of a date,
+followed by a query:
+.IP
+.nf
+\f[C]
+=\ expenses:gifts
+\ \ \ \ budget:gifts\ \ *\-1
+\ \ \ \ assets:budget\ \ *1
+\f[]
+.fi
+.PP
+When \f[C]\-\-auto\f[] option is specified on the command line,
+automated posting rule will add its postings to all transactions that
+match the query.
+.PP
+If amount in the automated posting rule includes commodity name, new
+posting will be made in the given commodity, otherwise commodity of the
+matched transaction will be used.
+.PP
+When amount in the automated posting rule begins with the '*', amount
+will be treated as a multiplier that is applied to the amount of the
+first posting in the matched transaction.
+.PP
+In example above, every transaction in \f[C]expenses:gifts\f[] account
+will have two additional postings added to it: amount of the original
+gift will be debited from \f[C]budget:gifts\f[] and credited into
+\f[C]assets:budget\f[]:
+.IP
+.nf
+\f[C]
+;\ Original\ transaction
+2017\-12\-14
+\ \ expenses:gifts\ \ $20
+\ \ assets
+
+;\ With\ automated\ postings\ applied
+2017/12/14
+\ \ \ \ expenses:gifts\ \ \ \ \ \ \ \ \ \ \ \ \ $20
+\ \ \ \ assets
+\ \ \ \ budget:gifts\ \ \ \ \ \ \ \ \ \ \ \ \ \ $\-20
+\ \ \ \ assets:budget\ \ \ \ \ \ \ \ \ \ \ \ \ \ $20
+\f[]
+.fi
.SH EDITOR SUPPORT
.PP
Add\-on modes exist for various text editors, to make working with
@@ -1098,7 +1194,7 @@ files:
.PP
.TS
tab(@);
-lw(16.5n) lw(51.5n).
+lw(16.5n) lw(53.5n).
T{
Emacs
T}@T{
diff --git a/doc/hledger_journal.5.info b/hledger_journal.info
index f71cc46..29fa944 100644
--- a/doc/hledger_journal.5.info
+++ b/hledger_journal.info
@@ -1,10 +1,10 @@
-This is hledger_journal.5.info, produced by makeinfo version 6.0 from
+This is hledger_journal.info, produced by makeinfo version 6.5 from
stdin.

-File: hledger_journal.5.info, Node: Top, Next: FILE FORMAT, Up: (dir)
+File: hledger_journal.info, Node: Top, Next: FILE FORMAT, Up: (dir)
-hledger_journal(5) hledger 1.4
+hledger_journal(5) hledger 1.5
******************************
hledger's usual data source is a plain text file containing journal
@@ -57,10 +57,12 @@ assisted by the helper modes for emacs or vim.
* Menu:
* FILE FORMAT::
+* Periodic transactions::
+* Automated posting rules::
* EDITOR SUPPORT::

-File: hledger_journal.5.info, Node: FILE FORMAT, Next: EDITOR SUPPORT, Prev: Top, Up: Top
+File: hledger_journal.info, Node: FILE FORMAT, Next: Periodic transactions, Prev: Top, Up: Top
1 FILE FORMAT
*************
@@ -83,7 +85,7 @@ File: hledger_journal.5.info, Node: FILE FORMAT, Next: EDITOR SUPPORT, Prev:
* Directives::

-File: hledger_journal.5.info, Node: Transactions, Next: Postings, Up: FILE FORMAT
+File: hledger_journal.info, Node: Transactions, Next: Postings, Up: FILE FORMAT
1.1 Transactions
================
@@ -105,7 +107,7 @@ following, separated by spaces:
representing...

-File: hledger_journal.5.info, Node: Postings, Next: Dates, Prev: Transactions, Up: FILE FORMAT
+File: hledger_journal.info, Node: Postings, Next: Dates, Prev: Transactions, Up: FILE FORMAT
1.2 Postings
============
@@ -133,7 +135,7 @@ spaces. But if you accidentally leave only one space (or tab) before
the amount, the amount will be considered part of the account name.

-File: hledger_journal.5.info, Node: Dates, Next: Status, Prev: Postings, Up: FILE FORMAT
+File: hledger_journal.info, Node: Dates, Next: Status, Prev: Postings, Up: FILE FORMAT
1.3 Dates
=========
@@ -145,7 +147,7 @@ File: hledger_journal.5.info, Node: Dates, Next: Status, Prev: Postings, Up:
* Posting dates::

-File: hledger_journal.5.info, Node: Simple dates, Next: Secondary dates, Up: Dates
+File: hledger_journal.info, Node: Simple dates, Next: Secondary dates, Up: Dates
1.3.1 Simple dates
------------------
@@ -158,7 +160,7 @@ command is run. Some examples: '2010/01/31', '1/31', '2010-01-31',
'2010.1.31'.

-File: hledger_journal.5.info, Node: Secondary dates, Next: Posting dates, Prev: Simple dates, Up: Dates
+File: hledger_journal.info, Node: Secondary dates, Next: Posting dates, Prev: Simple dates, Up: Dates
1.3.2 Secondary dates
---------------------
@@ -199,7 +201,7 @@ Ledger compatibility, but posting dates are a more powerful and less
confusing alternative.

-File: hledger_journal.5.info, Node: Posting dates, Prev: Secondary dates, Up: Dates
+File: hledger_journal.info, Node: Posting dates, Prev: Secondary dates, Up: Dates
1.3.3 Posting dates
-------------------
@@ -234,7 +236,7 @@ characters in this way. With this syntax, DATE infers its year from the
transaction and DATE2 infers its year from DATE.

-File: hledger_journal.5.info, Node: Status, Next: Description, Prev: Dates, Up: FILE FORMAT
+File: hledger_journal.info, Node: Status, Next: Description, Prev: Dates, Up: FILE FORMAT
1.4 Status
==========
@@ -270,13 +272,13 @@ toggle transaction status with C-c C-e, or posting status with C-c C-c.
What "uncleared", "pending", and "cleared" actually mean is up to
you. Here's one suggestion:
-status meaning
+status meaning
--------------------------------------------------------------------------
-uncleared recorded but not yet reconciled; needs review
-pending tentatively reconciled (if needed, eg during a big
- reconciliation)
-cleared complete, reconciled as far as possible, and considered
- correct
+uncleared recorded but not yet reconciled; needs review
+pending tentatively reconciled (if needed, eg during a big
+ reconciliation)
+cleared complete, reconciled as far as possible, and considered
+ correct
With this scheme, you would use '-PC' to see the current balance at
your bank, '-U' to see things which will probably hit your bank soon
@@ -284,7 +286,7 @@ your bank, '-U' to see things which will probably hit your bank soon
your finances.

-File: hledger_journal.5.info, Node: Description, Next: Account names, Prev: Status, Up: FILE FORMAT
+File: hledger_journal.info, Node: Description, Next: Account names, Prev: Status, Up: FILE FORMAT
1.5 Description
===============
@@ -299,7 +301,7 @@ comments.
* Payee and note::

-File: hledger_journal.5.info, Node: Payee and note, Up: Description
+File: hledger_journal.info, Node: Payee and note, Up: Description
1.5.1 Payee and note
--------------------
@@ -310,7 +312,7 @@ the right. This may be worthwhile if you need to do more precise
querying and pivoting by payee.

-File: hledger_journal.5.info, Node: Account names, Next: Amounts, Prev: Description, Up: FILE FORMAT
+File: hledger_journal.info, Node: Account names, Next: Amounts, Prev: Description, Up: FILE FORMAT
1.6 Account names
=================
@@ -328,7 +330,7 @@ more spaces* (or newline).
Account names can be aliased.

-File: hledger_journal.5.info, Node: Amounts, Next: Virtual Postings, Prev: Account names, Up: FILE FORMAT
+File: hledger_journal.info, Node: Amounts, Next: Virtual Postings, Prev: Account names, Up: FILE FORMAT
1.7 Amounts
===========
@@ -346,6 +348,7 @@ commodity name. Some examples:
'-$1,000,000.00'
'INR 9,99,99,999.00'
'EUR -2.000.000,00'
+'1 999 999.9455'
As you can see, the amount format is somewhat flexible:
@@ -358,10 +361,25 @@ commodity name. Some examples:
* negative amounts with a commodity on the left can have the minus
sign before or after it
* digit groups (thousands, or any other grouping) can be separated by
- commas (in which case period is used for decimal point) or periods
- (in which case comma is used for decimal point)
+ space or comma or period and should be used as separator between
+ all groups
+ * decimal part can be separated by comma or period and should be
+ different from digit groups separator
- You can use any of these variations when recording data, but when
+ You can use any of these variations when recording data. However,
+there is some ambiguous way of representing numbers like '$1.000' and
+'$1,000' both may mean either one thousand or one dollar. By default
+hledger will assume that this is sole delimiter is used only for
+decimals. On the other hand commodity format declared prior to that
+line will help to resolve that ambiguity differently:
+
+commodity $1,000.00
+
+2017/12/25 New life of Scrooge
+ expenses:gifts $1,000
+ assets
+
+ Though journal may contain mixed styles to represent amount, when
hledger displays amounts, it will choose a consistent format for each
commodity. (Except for price amounts, which are always formatted as
written). The display format is chosen as follows:
@@ -383,7 +401,7 @@ when -V is used.) If you find this causing problems, set the desired
format with a commodity directive.

-File: hledger_journal.5.info, Node: Virtual Postings, Next: Balance Assertions, Prev: Amounts, Up: FILE FORMAT
+File: hledger_journal.info, Node: Virtual Postings, Next: Balance Assertions, Prev: Amounts, Up: FILE FORMAT
1.8 Virtual Postings
====================
@@ -418,7 +436,7 @@ can usually find an equivalent journal entry using real postings, which
is more correct and provides better error checking.

-File: hledger_journal.5.info, Node: Balance Assertions, Next: Balance Assignments, Prev: Virtual Postings, Up: FILE FORMAT
+File: hledger_journal.info, Node: Balance Assertions, Next: Balance Assignments, Prev: Virtual Postings, Up: FILE FORMAT
1.9 Balance Assertions
======================
@@ -452,7 +470,7 @@ or for reading Ledger files.
* Assertions and virtual postings::

-File: hledger_journal.5.info, Node: Assertions and ordering, Next: Assertions and included files, Up: Balance Assertions
+File: hledger_journal.info, Node: Assertions and ordering, Next: Assertions and included files, Up: Balance Assertions
1.9.1 Assertions and ordering
-----------------------------
@@ -471,7 +489,7 @@ control over the order of postings and assertions within a day, so you
can assert intra-day balances.

-File: hledger_journal.5.info, Node: Assertions and included files, Next: Assertions and multiple -f options, Prev: Assertions and ordering, Up: Balance Assertions
+File: hledger_journal.info, Node: Assertions and included files, Next: Assertions and multiple -f options, Prev: Assertions and ordering, Up: Balance Assertions
1.9.2 Assertions and included files
-----------------------------------
@@ -483,7 +501,7 @@ and you also want to assert the account's balance on the same day,
you'll have to put the assertion in the right file.

-File: hledger_journal.5.info, Node: Assertions and multiple -f options, Next: Assertions and commodities, Prev: Assertions and included files, Up: Balance Assertions
+File: hledger_journal.info, Node: Assertions and multiple -f options, Next: Assertions and commodities, Prev: Assertions and included files, Up: Balance Assertions
1.9.3 Assertions and multiple -f options
----------------------------------------
@@ -492,7 +510,7 @@ Balance assertions don't work well across files specified with multiple
-f options. Use include or concatenate the files instead.

-File: hledger_journal.5.info, Node: Assertions and commodities, Next: Assertions and subaccounts, Prev: Assertions and multiple -f options, Up: Balance Assertions
+File: hledger_journal.info, Node: Assertions and commodities, Next: Assertions and subaccounts, Prev: Assertions and multiple -f options, Up: Balance Assertions
1.9.4 Assertions and commodities
--------------------------------
@@ -511,7 +529,7 @@ account does not contain some unexpected commodity. (We'll add support
for this kind of total balance assertion if there's demand.)

-File: hledger_journal.5.info, Node: Assertions and subaccounts, Next: Assertions and virtual postings, Prev: Assertions and commodities, Up: Balance Assertions
+File: hledger_journal.info, Node: Assertions and subaccounts, Next: Assertions and virtual postings, Prev: Assertions and commodities, Up: Balance Assertions
1.9.5 Assertions and subaccounts
--------------------------------
@@ -534,7 +552,7 @@ $ hledger bal checking --flat
2

-File: hledger_journal.5.info, Node: Assertions and virtual postings, Prev: Assertions and subaccounts, Up: Balance Assertions
+File: hledger_journal.info, Node: Assertions and virtual postings, Prev: Assertions and subaccounts, Up: Balance Assertions
1.9.6 Assertions and virtual postings
-------------------------------------
@@ -544,7 +562,7 @@ virtual. They are not affected by the '--real/-R' flag or 'real:'
query.

-File: hledger_journal.5.info, Node: Balance Assignments, Next: Prices, Prev: Balance Assertions, Up: FILE FORMAT
+File: hledger_journal.info, Node: Balance Assignments, Next: Prices, Prev: Balance Assertions, Up: FILE FORMAT
1.10 Balance Assignments
========================
@@ -555,7 +573,7 @@ equals sign; instead it is calculated automatically so as to satisfy the
assertion. This can be a convenience during data entry, eg when setting
opening balances:
-; starting a new journal, set asset account balances
+; starting a new journal, set asset account balances
2016/1/1 opening balances
assets:checking = $409.32
assets:savings = $735.24
@@ -577,7 +595,7 @@ little less explicit; to know the exact amount posted, you have to run
hledger or do the calculations yourself, instead of just reading it.

-File: hledger_journal.5.info, Node: Prices, Next: Comments, Prev: Balance Assignments, Up: FILE FORMAT
+File: hledger_journal.info, Node: Prices, Next: Comments, Prev: Balance Assignments, Up: FILE FORMAT
1.11 Prices
===========
@@ -588,7 +606,7 @@ File: hledger_journal.5.info, Node: Prices, Next: Comments, Prev: Balance Ass
* Market prices::

-File: hledger_journal.5.info, Node: Transaction prices, Next: Market prices, Up: Prices
+File: hledger_journal.info, Node: Transaction prices, Next: Market prices, Up: Prices
1.11.1 Transaction prices
-------------------------
@@ -649,7 +667,7 @@ $ hledger bal -N --flat -B
€100 assets:euros

-File: hledger_journal.5.info, Node: Market prices, Prev: Transaction prices, Up: Prices
+File: hledger_journal.info, Node: Market prices, Prev: Transaction prices, Up: Prices
1.11.2 Market prices
--------------------
@@ -678,14 +696,15 @@ P 2009/1/1 € $1.35
P 2010/1/1 € $1.40

-File: hledger_journal.5.info, Node: Comments, Next: Tags, Prev: Prices, Up: FILE FORMAT
+File: hledger_journal.info, Node: Comments, Next: Tags, Prev: Prices, Up: FILE FORMAT
1.12 Comments
=============
Lines in the journal beginning with a semicolon (';') or hash ('#') or
-asterisk ('*') are comments, and will be ignored. (Asterisk comments
-make it easy to treat your journal like an org-mode outline in emacs.)
+star ('*') are comments, and will be ignored. (Star comments cause
+org-mode nodes to be ignored, allowing emacs users to fold and navigate
+their journals with org-mode or orgstruct-mode.)
Also, anything between 'comment' and 'end comment' directives is a
(multi-line) comment. If there is no 'end comment', the comment extends
@@ -695,18 +714,19 @@ to the end of the file.
description and/or indented on the following lines (before the
postings). Similarly, you can attach comments to an individual posting
by writing them after the amount and/or indented on the following lines.
+Transaction and posting comments must begin with a semicolon (';').
Some examples:
-# a journal comment
+# a file comment
-; also a journal comment
+; also a file comment
comment
-This is a multiline comment,
+This is a multiline file comment,
which continues until a line
where the "end comment" string
-appears on its own.
+appears on its own (or end of file).
end comment
2012/5/14 something ; a transaction comment
@@ -715,10 +735,10 @@ end comment
posting2
; a comment for posting 2
; another comment line for posting 2
-; a journal comment (because not indented)
+; a file comment (because not indented)

-File: hledger_journal.5.info, Node: Tags, Next: Directives, Prev: Comments, Up: FILE FORMAT
+File: hledger_journal.info, Node: Tags, Next: Directives, Prev: Comments, Up: FILE FORMAT
1.13 Tags
=========
@@ -761,7 +781,7 @@ example, the following transaction has three tags ('A', 'TAG2',
are simple strings.

-File: hledger_journal.5.info, Node: Directives, Prev: Tags, Up: FILE FORMAT
+File: hledger_journal.info, Node: Directives, Prev: Tags, Up: FILE FORMAT
1.14 Directives
===============
@@ -778,7 +798,7 @@ File: hledger_journal.5.info, Node: Directives, Prev: Tags, Up: FILE FORMAT
* Including other files::

-File: hledger_journal.5.info, Node: Account aliases, Next: account directive, Up: Directives
+File: hledger_journal.info, Node: Account aliases, Next: account directive, Up: Directives
1.14.1 Account aliases
----------------------
@@ -803,7 +823,7 @@ be useful for:
* end aliases::

-File: hledger_journal.5.info, Node: Basic aliases, Next: Regex aliases, Up: Account aliases
+File: hledger_journal.info, Node: Basic aliases, Next: Regex aliases, Up: Account aliases
1.14.1.1 Basic aliases
......................
@@ -826,7 +846,7 @@ alias checking = assets:bank:wells fargo:checking
# rewrites "checking" to "assets:bank:wells fargo:checking", or "checking:a" to "assets:bank:wells fargo:checking:a"

-File: hledger_journal.5.info, Node: Regex aliases, Next: Multiple aliases, Prev: Basic aliases, Up: Account aliases
+File: hledger_journal.info, Node: Regex aliases, Next: Multiple aliases, Prev: Basic aliases, Up: Account aliases
1.14.1.2 Regex aliases
......................
@@ -851,7 +871,7 @@ command line, to end of option argument), so it can contain trailing
whitespace.

-File: hledger_journal.5.info, Node: Multiple aliases, Next: end aliases, Prev: Regex aliases, Up: Account aliases
+File: hledger_journal.info, Node: Multiple aliases, Next: end aliases, Prev: Regex aliases, Up: Account aliases
1.14.1.3 Multiple aliases
.........................
@@ -867,7 +887,7 @@ following order:
2. alias options, in the order they appear on the command line

-File: hledger_journal.5.info, Node: end aliases, Prev: Multiple aliases, Up: Account aliases
+File: hledger_journal.info, Node: end aliases, Prev: Multiple aliases, Up: Account aliases
1.14.1.4 end aliases
....................
@@ -878,7 +898,7 @@ aliases' directive:
end aliases

-File: hledger_journal.5.info, Node: account directive, Next: apply account directive, Prev: Account aliases, Up: Directives
+File: hledger_journal.info, Node: account directive, Next: apply account directive, Prev: Account aliases, Up: Directives
1.14.2 account directive
------------------------
@@ -899,7 +919,7 @@ account expenses:food
; etc.

-File: hledger_journal.5.info, Node: apply account directive, Next: Multi-line comments, Prev: account directive, Up: Directives
+File: hledger_journal.info, Node: apply account directive, Next: Multi-line comments, Prev: account directive, Up: Directives
1.14.3 apply account directive
------------------------------
@@ -935,7 +955,7 @@ include personal.journal
supported.

-File: hledger_journal.5.info, Node: Multi-line comments, Next: commodity directive, Prev: apply account directive, Up: Directives
+File: hledger_journal.info, Node: Multi-line comments, Next: commodity directive, Prev: apply account directive, Up: Directives
1.14.4 Multi-line comments
--------------------------
@@ -944,7 +964,7 @@ A line containing just 'comment' starts a multi-line comment, and a line
containing just 'end comment' ends it. See comments.

-File: hledger_journal.5.info, Node: commodity directive, Next: Default commodity, Prev: Multi-line comments, Up: Directives
+File: hledger_journal.info, Node: commodity directive, Next: Default commodity, Prev: Multi-line comments, Up: Directives
1.14.5 commodity directive
--------------------------
@@ -976,7 +996,7 @@ commodity INR
format INR 9,99,99,999.00

-File: hledger_journal.5.info, Node: Default commodity, Next: Default year, Prev: commodity directive, Up: Directives
+File: hledger_journal.info, Node: Default commodity, Next: Default year, Prev: commodity directive, Up: Directives
1.14.6 Default commodity
------------------------
@@ -992,11 +1012,11 @@ amounts, or until the next D directive.
D $1,000.00
1/1
- a 5 # <- commodity-less amount, becomes $1
+ a 5 ; <- commodity-less amount, becomes $1
b

-File: hledger_journal.5.info, Node: Default year, Next: Including other files, Prev: Default commodity, Up: Directives
+File: hledger_journal.info, Node: Default year, Next: Including other files, Prev: Default commodity, Up: Directives
1.14.7 Default year
-------------------
@@ -1022,7 +1042,7 @@ Y2010 ; change default year to 2010
assets

-File: hledger_journal.5.info, Node: Including other files, Prev: Default year, Up: Directives
+File: hledger_journal.info, Node: Including other files, Prev: Default year, Up: Directives
1.14.8 Including other files
----------------------------
@@ -1039,9 +1059,73 @@ current file. Glob patterns ('*') are not currently supported.
include journal, timeclock or timedot files, but not CSV files.

-File: hledger_journal.5.info, Node: EDITOR SUPPORT, Prev: FILE FORMAT, Up: Top
+File: hledger_journal.info, Node: Periodic transactions, Next: Automated posting rules, Prev: FILE FORMAT, Up: Top
+
+2 Periodic transactions
+***********************
+
+A periodic transaction starts with a tilde '~' in place of a date
+followed by a period expression:
+
+~ weekly
+ assets:bank:checking $400 ; paycheck
+ income:acme inc
+
+ Periodic transactions are used for forecasting and budgeting only,
+they have no effect unless the '--forecast' or '--budget' flag is used.
+With '--forecast', each periodic transaction rule generates recurring
+forecast transactions at the specified interval, beginning the day after
+the last recorded journal transaction and ending 6 months from today, or
+at the specified report end date. With 'balance --budget', each
+periodic transaction declares recurring budget goals for one or more
+accounts.
+For more details, see: balance > Budgeting, Budgeting and Forecasting.
+
+
+File: hledger_journal.info, Node: Automated posting rules, Next: EDITOR SUPPORT, Prev: Periodic transactions, Up: Top
+
+3 Automated posting rules
+*************************
+
+Automated posting rule starts with an equal sign '=' in place of a date,
+followed by a query:
+
+= expenses:gifts
+ budget:gifts *-1
+ assets:budget *1
+
+ When '--auto' option is specified on the command line, automated
+posting rule will add its postings to all transactions that match the
+query.
+
+ If amount in the automated posting rule includes commodity name, new
+posting will be made in the given commodity, otherwise commodity of the
+matched transaction will be used.
+
+ When amount in the automated posting rule begins with the '*', amount
+will be treated as a multiplier that is applied to the amount of the
+first posting in the matched transaction.
+
+ In example above, every transaction in 'expenses:gifts' account will
+have two additional postings added to it: amount of the original gift
+will be debited from 'budget:gifts' and credited into 'assets:budget':
+
+; Original transaction
+2017-12-14
+ expenses:gifts $20
+ assets
+
+; With automated postings applied
+2017/12/14
+ expenses:gifts $20
+ assets
+ budget:gifts $-20
+ assets:budget $20
+
+
+File: hledger_journal.info, Node: EDITOR SUPPORT, Prev: Automated posting rules, Up: Top
-2 EDITOR SUPPORT
+4 EDITOR SUPPORT
****************
Add-on modes exist for various text editors, to make working with
@@ -1062,86 +1146,90 @@ Code

Tag Table:
-Node: Top78
-Node: FILE FORMAT2374
-Ref: #file-format2500
-Node: Transactions2723
-Ref: #transactions2846
-Node: Postings3530
-Ref: #postings3659
-Node: Dates4654
-Ref: #dates4771
-Node: Simple dates4836
-Ref: #simple-dates4964
-Node: Secondary dates5330
-Ref: #secondary-dates5486
-Node: Posting dates7049
-Ref: #posting-dates7180
-Node: Status8554
-Ref: #status8676
-Node: Description10390
-Ref: #description10530
-Node: Payee and note10849
-Ref: #payee-and-note10965
-Node: Account names11207
-Ref: #account-names11352
-Node: Amounts11839
-Ref: #amounts11977
-Node: Virtual Postings14078
-Ref: #virtual-postings14239
-Node: Balance Assertions15459
-Ref: #balance-assertions15636
-Node: Assertions and ordering16532
-Ref: #assertions-and-ordering16720
-Node: Assertions and included files17420
-Ref: #assertions-and-included-files17663
-Node: Assertions and multiple -f options17996
-Ref: #assertions-and-multiple--f-options18252
-Node: Assertions and commodities18384
-Ref: #assertions-and-commodities18621
-Node: Assertions and subaccounts19317
-Ref: #assertions-and-subaccounts19551
-Node: Assertions and virtual postings20072
-Ref: #assertions-and-virtual-postings20281
-Node: Balance Assignments20423
-Ref: #balance-assignments20594
-Node: Prices21713
-Ref: #prices21848
-Node: Transaction prices21899
-Ref: #transaction-prices22046
-Node: Market prices24202
-Ref: #market-prices24339
-Node: Comments25299
-Ref: #comments25423
-Node: Tags26536
-Ref: #tags26656
-Node: Directives28058
-Ref: #directives28173
-Node: Account aliases28366
-Ref: #account-aliases28512
-Node: Basic aliases29116
-Ref: #basic-aliases29261
-Node: Regex aliases29951
-Ref: #regex-aliases30121
-Node: Multiple aliases30839
-Ref: #multiple-aliases31013
-Node: end aliases31511
-Ref: #end-aliases31653
-Node: account directive31754
-Ref: #account-directive31936
-Node: apply account directive32232
-Ref: #apply-account-directive32430
-Node: Multi-line comments33089
-Ref: #multi-line-comments33281
-Node: commodity directive33409
-Ref: #commodity-directive33595
-Node: Default commodity34467
-Ref: #default-commodity34642
-Node: Default year35179
-Ref: #default-year35346
-Node: Including other files35769
-Ref: #including-other-files35928
-Node: EDITOR SUPPORT36325
-Ref: #editor-support36445
+Node: Top76
+Node: FILE FORMAT2424
+Ref: #file-format2555
+Node: Transactions2778
+Ref: #transactions2899
+Node: Postings3583
+Ref: #postings3710
+Node: Dates4705
+Ref: #dates4820
+Node: Simple dates4885
+Ref: #simple-dates5011
+Node: Secondary dates5377
+Ref: #secondary-dates5531
+Node: Posting dates7094
+Ref: #posting-dates7223
+Node: Status8597
+Ref: #status8717
+Node: Description10425
+Ref: #description10563
+Node: Payee and note10882
+Ref: #payee-and-note10996
+Node: Account names11238
+Ref: #account-names11381
+Node: Amounts11868
+Ref: #amounts12004
+Node: Virtual Postings14684
+Ref: #virtual-postings14843
+Node: Balance Assertions16063
+Ref: #balance-assertions16238
+Node: Assertions and ordering17134
+Ref: #assertions-and-ordering17320
+Node: Assertions and included files18020
+Ref: #assertions-and-included-files18261
+Node: Assertions and multiple -f options18594
+Ref: #assertions-and-multiple--f-options18848
+Node: Assertions and commodities18980
+Ref: #assertions-and-commodities19215
+Node: Assertions and subaccounts19911
+Ref: #assertions-and-subaccounts20143
+Node: Assertions and virtual postings20664
+Ref: #assertions-and-virtual-postings20871
+Node: Balance Assignments21013
+Ref: #balance-assignments21182
+Node: Prices22302
+Ref: #prices22435
+Node: Transaction prices22486
+Ref: #transaction-prices22631
+Node: Market prices24787
+Ref: #market-prices24922
+Node: Comments25882
+Ref: #comments26004
+Node: Tags27246
+Ref: #tags27364
+Node: Directives28766
+Ref: #directives28879
+Node: Account aliases29072
+Ref: #account-aliases29216
+Node: Basic aliases29820
+Ref: #basic-aliases29963
+Node: Regex aliases30653
+Ref: #regex-aliases30821
+Node: Multiple aliases31539
+Ref: #multiple-aliases31711
+Node: end aliases32209
+Ref: #end-aliases32349
+Node: account directive32450
+Ref: #account-directive32630
+Node: apply account directive32926
+Ref: #apply-account-directive33122
+Node: Multi-line comments33781
+Ref: #multi-line-comments33971
+Node: commodity directive34099
+Ref: #commodity-directive34283
+Node: Default commodity35155
+Ref: #default-commodity35328
+Node: Default year35865
+Ref: #default-year36030
+Node: Including other files36453
+Ref: #including-other-files36610
+Node: Periodic transactions37007
+Ref: #periodic-transactions37178
+Node: Automated posting rules37921
+Ref: #automated-posting-rules38099
+Node: EDITOR SUPPORT39208
+Ref: #editor-support39338

End Tag Table
diff --git a/doc/hledger_journal.5.txt b/hledger_journal.txt
index 7377cbd..9df70a9 100644
--- a/doc/hledger_journal.5.txt
+++ b/hledger_journal.txt
@@ -211,10 +211,10 @@ FILE FORMAT
status meaning
--------------------------------------------------------------------------
uncleared recorded but not yet reconciled; needs review
- pending tentatively reconciled (if needed, eg during a big recon-
- ciliation)
- cleared complete, reconciled as far as possible, and considered
- correct
+ pending tentatively reconciled (if needed, eg during a big reconcil-
+ iation)
+ cleared complete, reconciled as far as possible, and considered cor-
+ rect
With this scheme, you would use -PC to see the current balance at your
bank, -U to see things which will probably hit your bank soon (like
@@ -260,6 +260,7 @@ FILE FORMAT
-$1,000,000.00
INR 9,99,99,999.00
EUR -2.000.000,00
+ 1 999 999.9455
As you can see, the amount format is somewhat flexible:
@@ -275,10 +276,26 @@ FILE FORMAT
before or after it
o digit groups (thousands, or any other grouping) can be separated by
- commas (in which case period is used for decimal point) or periods
- (in which case comma is used for decimal point)
+ space or comma or period and should be used as separator between all
+ groups
- You can use any of these variations when recording data, but when
+ o decimal part can be separated by comma or period and should be dif-
+ ferent from digit groups separator
+
+ You can use any of these variations when recording data. However,
+ there is some ambiguous way of representing numbers like $1.000 and
+ $1,000 both may mean either one thousand or one dollar. By default
+ hledger will assume that this is sole delimiter is used only for deci-
+ mals. On the other hand commodity format declared prior to that line
+ will help to resolve that ambiguity differently:
+
+ commodity $1,000.00
+
+ 2017/12/25 New life of Scrooge
+ expenses:gifts $1,000
+ assets
+
+ Though journal may contain mixed styles to represent amount, when
hledger displays amounts, it will choose a consistent format for each
commodity. (Except for price amounts, which are always formatted as
written). The display format is chosen as follows:
@@ -523,9 +540,10 @@ FILE FORMAT
P 2010/1/1 $1.40
Comments
- Lines in the journal beginning with a semicolon (;) or hash (#) or
- asterisk (*) are comments, and will be ignored. (Asterisk comments
- make it easy to treat your journal like an org-mode outline in emacs.)
+ Lines in the journal beginning with a semicolon (;) or hash (#) or star
+ (*) are comments, and will be ignored. (Star comments cause org-mode
+ nodes to be ignored, allowing emacs users to fold and navigate their
+ journals with org-mode or orgstruct-mode.)
Also, anything between comment and end comment directives is a
(multi-line) comment. If there is no end comment, the comment extends
@@ -534,19 +552,20 @@ FILE FORMAT
You can attach comments to a transaction by writing them after the
description and/or indented on the following lines (before the post-
ings). Similarly, you can attach comments to an individual posting by
- writing them after the amount and/or indented on the following lines.
+ writing them after the amount and/or indented on the following lines.
+ Transaction and posting comments must begin with a semicolon (;).
Some examples:
- # a journal comment
+ # a file comment
- ; also a journal comment
+ ; also a file comment
comment
- This is a multiline comment,
+ This is a multiline file comment,
which continues until a line
where the "end comment" string
- appears on its own.
+ appears on its own (or end of file).
end comment
2012/5/14 something ; a transaction comment
@@ -555,23 +574,23 @@ FILE FORMAT
posting2
; a comment for posting 2
; another comment line for posting 2
- ; a journal comment (because not indented)
+ ; a file comment (because not indented)
Tags
- Tags are a way to add extra labels or labelled data to postings and
+ Tags are a way to add extra labels or labelled data to postings and
transactions, which you can then search or pivot on.
- A simple tag is a word (which may contain hyphens) followed by a full
+ A simple tag is a word (which may contain hyphens) followed by a full
colon, written inside a transaction or posting comment line:
2017/1/16 bought groceries ; sometag:
- Tags can have a value, which is the text after the colon, up to the
+ Tags can have a value, which is the text after the colon, up to the
next comma or end of line, with leading/trailing whitespace removed:
expenses:food $10 ; a-posting-tag: the tag value
- Note this means hledger's tag values can not contain commas or new-
+ Note this means hledger's tag values can not contain commas or new-
lines. Ending at commas means you can write multiple short tags on one
line, comma separated:
@@ -585,21 +604,21 @@ FILE FORMAT
o "tag2" is another tag, whose value is "some value ..."
- Tags in a transaction comment affect the transaction and all of its
- postings, while tags in a posting comment affect only that posting.
- For example, the following transaction has three tags (A, TAG2,
+ Tags in a transaction comment affect the transaction and all of its
+ postings, while tags in a posting comment affect only that posting.
+ For example, the following transaction has three tags (A, TAG2,
third-tag) and the posting has four (those plus posting-tag):
1/1 a transaction ; A:, TAG2:
; third-tag: a third transaction tag, <- with a value
(a) $1 ; posting-tag:
- Tags are like Ledger's metadata feature, except hledger's tag values
+ Tags are like Ledger's metadata feature, except hledger's tag values
are simple strings.
Directives
Account aliases
- You can define aliases which rewrite your account names (after reading
+ You can define aliases which rewrite your account names (after reading
the journal, before generating reports). hledger's account aliases can
be useful for:
@@ -616,8 +635,8 @@ FILE FORMAT
See also Cookbook: rewrite account names.
Basic aliases
- To set an account alias, use the alias directive in your journal file.
- This affects all subsequent journal entries in the current file or its
+ To set an account alias, use the alias directive in your journal file.
+ This affects all subsequent journal entries in the current file or its
included files. The spaces around the = are optional:
alias OLD = NEW
@@ -625,54 +644,54 @@ FILE FORMAT
Or, you can use the --alias 'OLD=NEW' option on the command line. This
affects all entries. It's useful for trying out aliases interactively.
- OLD and NEW are full account names. hledger will replace any occur-
- rence of the old account name with the new one. Subaccounts are also
+ OLD and NEW are full account names. hledger will replace any occur-
+ rence of the old account name with the new one. Subaccounts are also
affected. Eg:
alias checking = assets:bank:wells fargo:checking
# rewrites "checking" to "assets:bank:wells fargo:checking", or "checking:a" to "assets:bank:wells fargo:checking:a"
Regex aliases
- There is also a more powerful variant that uses a regular expression,
+ There is also a more powerful variant that uses a regular expression,
indicated by the forward slashes:
alias /REGEX/ = REPLACEMENT
or --alias '/REGEX/=REPLACEMENT'.
- REGEX is a case-insensitive regular expression. Anywhere it matches
- inside an account name, the matched part will be replaced by REPLACE-
- MENT. If REGEX contains parenthesised match groups, these can be ref-
+ REGEX is a case-insensitive regular expression. Anywhere it matches
+ inside an account name, the matched part will be replaced by REPLACE-
+ MENT. If REGEX contains parenthesised match groups, these can be ref-
erenced by the usual numeric backreferences in REPLACEMENT. Eg:
alias /^(.+):bank:([^:]+)(.*)/ = \1:\2 \3
# rewrites "assets:bank:wells fargo:checking" to "assets:wells fargo checking"
- Also note that REPLACEMENT continues to the end of line (or on command
- line, to end of option argument), so it can contain trailing white-
+ Also note that REPLACEMENT continues to the end of line (or on command
+ line, to end of option argument), so it can contain trailing white-
space.
Multiple aliases
- You can define as many aliases as you like using directives or com-
- mand-line options. Aliases are recursive - each alias sees the result
- of applying previous ones. (This is different from Ledger, where
+ You can define as many aliases as you like using directives or com-
+ mand-line options. Aliases are recursive - each alias sees the result
+ of applying previous ones. (This is different from Ledger, where
aliases are non-recursive by default). Aliases are applied in the fol-
lowing order:
- 1. alias directives, most recently seen first (recent directives take
+ 1. alias directives, most recently seen first (recent directives take
precedence over earlier ones; directives not yet seen are ignored)
2. alias options, in the order they appear on the command line
end aliases
- You can clear (forget) all currently defined aliases with the
+ You can clear (forget) all currently defined aliases with the
end aliases directive:
end aliases
account directive
- The account directive predefines account names, as in Ledger and Bean-
- count. This may be useful for your own documentation; hledger doesn't
+ The account directive predefines account names, as in Ledger and Bean-
+ count. This may be useful for your own documentation; hledger doesn't
make use of it yet.
; account ACCT
@@ -687,8 +706,8 @@ FILE FORMAT
; etc.
apply account directive
- You can specify a parent account which will be prepended to all
- accounts within a section of the journal. Use the apply account and
+ You can specify a parent account which will be prepended to all
+ accounts within a section of the journal. Use the apply account and
end apply account directives like so:
apply account home
@@ -705,7 +724,7 @@ FILE FORMAT
home:food $10
home:cash $-10
- If end apply account is omitted, the effect lasts to the end of the
+ If end apply account is omitted, the effect lasts to the end of the
file. Included files are also affected, eg:
apply account business
@@ -714,16 +733,16 @@ FILE FORMAT
apply account personal
include personal.journal
- Prior to hledger 1.0, legacy account and end spellings were also sup-
+ Prior to hledger 1.0, legacy account and end spellings were also sup-
ported.
Multi-line comments
- A line containing just comment starts a multi-line comment, and a line
+ A line containing just comment starts a multi-line comment, and a line
containing just end comment ends it. See comments.
commodity directive
- The commodity directive predefines commodities (currently this is just
- informational), and also it may define the display format for amounts
+ The commodity directive predefines commodities (currently this is just
+ informational), and also it may define the display format for amounts
in this commodity (overriding the automatically inferred format).
It may be written on a single line, like this:
@@ -735,8 +754,8 @@ FILE FORMAT
; separating thousands with comma.
commodity 1,000.0000 AAAA
- or on multiple lines, using the "format" subdirective. In this case
- the commodity symbol appears twice and should be the same in both
+ or on multiple lines, using the "format" subdirective. In this case
+ the commodity symbol appears twice and should be the same in both
places:
; commodity SYMBOL
@@ -749,10 +768,10 @@ FILE FORMAT
format INR 9,99,99,999.00
Default commodity
- The D directive sets a default commodity (and display format), to be
+ The D directive sets a default commodity (and display format), to be
used for amounts without a commodity symbol (ie, plain numbers). (Note
- this differs from Ledger's default commodity directive.) The commodity
- and display format will be applied to all subsequent commodity-less
+ this differs from Ledger's default commodity directive.) The commodity
+ and display format will be applied to all subsequent commodity-less
amounts, or until the next D directive.
# commodity-less amounts should be treated as dollars
@@ -760,12 +779,12 @@ FILE FORMAT
D $1,000.00
1/1
- a 5 # <- commodity-less amount, becomes $1
+ a 5 ; <- commodity-less amount, becomes $1
b
Default year
- You can set a default year to be used for subsequent dates which don't
- specify a year. This is a line beginning with Y followed by the year.
+ You can set a default year to be used for subsequent dates which don't
+ specify a year. This is a line beginning with Y followed by the year.
Eg:
Y2009 ; set default year to 2009
@@ -785,45 +804,96 @@ FILE FORMAT
assets
Including other files
- You can pull in the content of additional journal files by writing an
+ You can pull in the content of additional journal files by writing an
include directive, like this:
include path/to/file.journal
- If the path does not begin with a slash, it is relative to the current
+ If the path does not begin with a slash, it is relative to the current
file. Glob patterns (*) are not currently supported.
- The include directive can only be used in journal files. It can
+ The include directive can only be used in journal files. It can
include journal, timeclock or timedot files, but not CSV files.
+Periodic transactions
+ A periodic transaction starts with a tilde `~' in place of a date fol-
+ lowed by a period expression:
+
+ ~ weekly
+ assets:bank:checking $400 ; paycheck
+ income:acme inc
+
+ Periodic transactions are used for forecasting and budgeting only, they
+ have no effect unless the --forecast or --budget flag is used. With
+ --forecast, each periodic transaction rule generates recurring forecast
+ transactions at the specified interval, beginning the day after the
+ last recorded journal transaction and ending 6 months from today, or at
+ the specified report end date. With balance --budget, each periodic
+ transaction declares recurring budget goals for one or more accounts.
+ For more details, see: balance > Budgeting, Budgeting and Forecasting.
+
+Automated posting rules
+ Automated posting rule starts with an equal sign `=' in place of a
+ date, followed by a query:
+
+ = expenses:gifts
+ budget:gifts *-1
+ assets:budget *1
+
+ When --auto option is specified on the command line, automated posting
+ rule will add its postings to all transactions that match the query.
+
+ If amount in the automated posting rule includes commodity name, new
+ posting will be made in the given commodity, otherwise commodity of the
+ matched transaction will be used.
+
+ When amount in the automated posting rule begins with the '*', amount
+ will be treated as a multiplier that is applied to the amount of the
+ first posting in the matched transaction.
+
+ In example above, every transaction in expenses:gifts account will have
+ two additional postings added to it: amount of the original gift will
+ be debited from budget:gifts and credited into assets:budget:
+
+ ; Original transaction
+ 2017-12-14
+ expenses:gifts $20
+ assets
+
+ ; With automated postings applied
+ 2017/12/14
+ expenses:gifts $20
+ assets
+ budget:gifts $-20
+ assets:budget $20
+
EDITOR SUPPORT
Add-on modes exist for various text editors, to make working with jour-
- nal files easier. They add colour, navigation aids and helpful com-
- mands. For hledger users who edit the journal file directly (the
+ nal files easier. They add colour, navigation aids and helpful com-
+ mands. For hledger users who edit the journal file directly (the
majority), using one of these modes is quite recommended.
- These were written with Ledger in mind, but also work with hledger
+ These were written with Ledger in mind, but also work with hledger
files:
Emacs http://www.ledger-cli.org/3.0/doc/ledger-mode.html
- Vim https://github.com/ledger/ledger/wiki/Get-
- ting-started
+ Vim https://github.com/ledger/ledger/wiki/Getting-started
+
+
Sublime Text https://github.com/ledger/ledger/wiki/Using-Sub-
lime-Text
Textmate https://github.com/ledger/ledger/wiki/Using-Text-
Mate-2
Text Wrangler https://github.com/ledger/ledger/wiki/Edit-
ing-Ledger-files-with-TextWrangler
-
-
Visual Studio https://marketplace.visualstudio.com/items?item-
Code Name=mark-hansen.hledger-vscode
REPORTING BUGS
- Report bugs at http://bugs.hledger.org (or on the #hledger IRC channel
+ Report bugs at http://bugs.hledger.org (or on the #hledger IRC channel
or hledger mail list)
@@ -837,7 +907,7 @@ COPYRIGHT
SEE ALSO
- hledger(1), hledger-ui(1), hledger-web(1), hledger-api(1),
+ hledger(1), hledger-ui(1), hledger-web(1), hledger-api(1),
hledger_csv(5), hledger_journal(5), hledger_timeclock(5), hledger_time-
dot(5), ledger(1)
@@ -845,4 +915,4 @@ SEE ALSO
-hledger 1.4 September 2017 hledger_journal(5)
+hledger 1.5 December 2017 hledger_journal(5)
diff --git a/doc/hledger_timeclock.5 b/hledger_timeclock.5
index 0ba0757..d0fe986 100644
--- a/doc/hledger_timeclock.5
+++ b/hledger_timeclock.5
@@ -1,5 +1,5 @@
-.TH "hledger_timeclock" "5" "September 2017" "hledger 1.4" "hledger User Manuals"
+.TH "hledger_timeclock" "5" "December 2017" "hledger 1.5" "hledger User Manuals"
@@ -9,7 +9,7 @@ Timeclock \- the time logging format of timeclock.el, as read by hledger
.SH DESCRIPTION
.PP
hledger can read timeclock files.
-As with Ledger, these are (a subset of) timeclock.el\[aq]s format,
+As with Ledger, these are (a subset of) timeclock.el's format,
containing clock\-in and clock\-out entries as in the example below.
The date is a simple date.
The time format is HH:MM[:SS][+\-ZZZZ].
@@ -63,20 +63,12 @@ use emacs and the built\-in timeclock.el, or the extended
timeclock\-x.el and perhaps the extras in ledgerutils.el
.IP \[bu] 2
at the command line, use these bash aliases:
-.RS 2
-.IP
-.nf
-\f[C]
-alias\ ti="echo\ i\ `date\ \[aq]+%Y\-%m\-%d\ %H:%M:%S\[aq]`\ \\$*\ >>$TIMELOG"
-alias\ to="echo\ o\ `date\ \[aq]+%Y\-%m\-%d\ %H:%M:%S\[aq]`\ >>$TIMELOG"
-\f[]
-.fi
-.RE
+\f[C]shell\ \ \ alias\ ti="echo\ i\ `date\ \[aq]+%Y\-%m\-%d\ %H:%M:%S\[aq]`\ \\$*\ >>$TIMELOG"\ \ \ alias\ to="echo\ o\ `date\ \[aq]+%Y\-%m\-%d\ %H:%M:%S\[aq]`\ >>$TIMELOG"\f[]
.IP \[bu] 2
or use the old \f[C]ti\f[] and \f[C]to\f[] scripts in the ledger 2.x
repository.
-These rely on a "timeclock" executable which I think is just the ledger
-2 executable renamed.
+These rely on a \[lq]timeclock\[rq] executable which I think is just the
+ledger 2 executable renamed.
.SH "REPORTING BUGS"
diff --git a/doc/hledger_timeclock.5.info b/hledger_timeclock.info
index 6ba2a52..3403f6a 100644
--- a/doc/hledger_timeclock.5.info
+++ b/hledger_timeclock.info
@@ -1,10 +1,10 @@
-This is hledger_timeclock.5.info, produced by makeinfo version 6.0 from
+This is hledger_timeclock.info, produced by makeinfo version 6.5 from
stdin.

-File: hledger_timeclock.5.info, Node: Top, Up: (dir)
+File: hledger_timeclock.info, Node: Top, Up: (dir)
-hledger_timeclock(5) hledger 1.4
+hledger_timeclock(5) hledger 1.5
********************************
hledger can read timeclock files. As with Ledger, these are (a subset
@@ -45,11 +45,9 @@ $ hledger -f sample.timeclock register -p weekly --depth 1 --empty # time summa
* use emacs and the built-in timeclock.el, or the extended
timeclock-x.el and perhaps the extras in ledgerutils.el
- * at the command line, use these bash aliases:
-
- alias ti="echo i `date '+%Y-%m-%d %H:%M:%S'` \$* >>$TIMELOG"
- alias to="echo o `date '+%Y-%m-%d %H:%M:%S'` >>$TIMELOG"
-
+ * at the command line, use these bash aliases: 'shell alias ti="echo
+ i `date '+%Y-%m-%d %H:%M:%S'` \$* >>$TIMELOG" alias to="echo o
+ `date '+%Y-%m-%d %H:%M:%S'` >>$TIMELOG"'
* or use the old 'ti' and 'to' scripts in the ledger 2.x repository.
These rely on a "timeclock" executable which I think is just the
ledger 2 executable renamed.
@@ -57,6 +55,6 @@ $ hledger -f sample.timeclock register -p weekly --depth 1 --empty # time summa

Tag Table:
-Node: Top80
+Node: Top78

End Tag Table
diff --git a/doc/hledger_timeclock.5.txt b/hledger_timeclock.txt
index 5840820..a0d9878 100644
--- a/doc/hledger_timeclock.5.txt
+++ b/hledger_timeclock.txt
@@ -45,10 +45,8 @@ DESCRIPTION
o use emacs and the built-in timeclock.el, or the extended time-
clock-x.el and perhaps the extras in ledgerutils.el
- o at the command line, use these bash aliases:
-
- alias ti="echo i `date '+%Y-%m-%d %H:%M:%S'` \$* >>$TIMELOG"
- alias to="echo o `date '+%Y-%m-%d %H:%M:%S'` >>$TIMELOG"
+ o at the command line, use these bash aliases:
+ shell alias ti="echo i `date '+%Y-%m-%d %H:%M:%S'` \$* >>$TIMELOG" alias to="echo o `date '+%Y-%m-%d %H:%M:%S'` >>$TIMELOG"
o or use the old ti and to scripts in the ledger 2.x repository. These
rely on a "timeclock" executable which I think is just the ledger 2
@@ -79,4 +77,4 @@ SEE ALSO
-hledger 1.4 September 2017 hledger_timeclock(5)
+hledger 1.5 December 2017 hledger_timeclock(5)
diff --git a/doc/hledger_timedot.5 b/hledger_timedot.5
index 13a7324..c516289 100644
--- a/doc/hledger_timedot.5
+++ b/hledger_timedot.5
@@ -1,11 +1,11 @@
-.TH "hledger_timedot" "5" "September 2017" "hledger 1.4" "hledger User Manuals"
+.TH "hledger_timedot" "5" "December 2017" "hledger 1.5" "hledger User Manuals"
.SH NAME
.PP
-Timedot \- hledger\[aq]s human\-friendly time logging format
+Timedot \- hledger's human\-friendly time logging format
.SH DESCRIPTION
.PP
Timedot is a plain text format for logging dated, categorised quantities
@@ -16,10 +16,10 @@ precise or too interruptive.
It can be formatted like a bar chart, making clear at a glance where
time was spent.
.PP
-Though called "timedot", this format is read by hledger as commodityless
-quantities, so it could be used to represent dated quantities other than
-time.
-In the docs below we\[aq]ll assume it\[aq]s time.
+Though called \[lq]timedot\[rq], this format is read by hledger as
+commodityless quantities, so it could be used to represent dated
+quantities other than time.
+In the docs below we'll assume it's time.
.SH FILE FORMAT
.PP
A timedot file contains a series of day entries.
@@ -34,7 +34,7 @@ Quantities can be written as:
.IP \[bu] 2
a sequence of dots (.) representing quarter hours.
Spaces may optionally be used for grouping and readability.
-Eg: ....
+Eg: \&....
\&..
.IP \[bu] 2
an integral or decimal number, representing hours.
diff --git a/doc/hledger_timedot.5.info b/hledger_timedot.info
index e3e9fa2..ac76544 100644
--- a/doc/hledger_timedot.5.info
+++ b/hledger_timedot.info
@@ -1,10 +1,10 @@
-This is hledger_timedot.5.info, produced by makeinfo version 6.0 from
+This is hledger_timedot.info, produced by makeinfo version 6.5 from
stdin.

-File: hledger_timedot.5.info, Node: Top, Next: FILE FORMAT, Up: (dir)
+File: hledger_timedot.info, Node: Top, Next: FILE FORMAT, Up: (dir)
-hledger_timedot(5) hledger 1.4
+hledger_timedot(5) hledger 1.5
******************************
Timedot is a plain text format for logging dated, categorised quantities
@@ -22,7 +22,7 @@ quantities other than time. In the docs below we'll assume it's time.
* FILE FORMAT::

-File: hledger_timedot.5.info, Node: FILE FORMAT, Prev: Top, Up: Top
+File: hledger_timedot.info, Node: FILE FORMAT, Prev: Top, Up: Top
1 FILE FORMAT
*************
@@ -53,7 +53,7 @@ example:
# on this day, 6h was spent on client work, 1.5h on haskell FOSS work, etc.
2016/2/1
inc:client1 .... .... .... .... .... ....
-fos:haskell .... ..
+fos:haskell .... ..
biz:research .
2016/2/2
@@ -79,17 +79,17 @@ $ hledger -f t.timedot print date:2016/2/2
$ hledger -f t.timedot bal --daily --tree
Balance changes in 2016/02/01-2016/02/03:
- || 2016/02/01d 2016/02/02d 2016/02/03d
+ || 2016/02/01d 2016/02/02d 2016/02/03d
============++========================================
- biz || 0.25 0.25 1.00
- research || 0.25 0.25 1.00
- fos || 1.50 0 3.00
- haskell || 1.50 0 0
- hledger || 0 0 3.00
- inc || 6.00 2.00 4.00
- client1 || 6.00 2.00 4.00
+ biz || 0.25 0.25 1.00
+ research || 0.25 0.25 1.00
+ fos || 1.50 0 3.00
+ haskell || 1.50 0 0
+ hledger || 0 0 3.00
+ inc || 6.00 2.00 4.00
+ client1 || 6.00 2.00 4.00
------------++----------------------------------------
- || 7.75 2.25 8.00
+ || 7.75 2.25 8.00
I prefer to use period for separating account components. We can
make this work with an account alias:
@@ -109,8 +109,8 @@ $ hledger -f t.timedot --alias /\\./=: bal date:2016/2/4

Tag Table:
-Node: Top78
-Node: FILE FORMAT809
-Ref: #file-format912
+Node: Top76
+Node: FILE FORMAT805
+Ref: #file-format906

End Tag Table
diff --git a/doc/hledger_timedot.5.txt b/hledger_timedot.txt
index fa7d582..757f679 100644
--- a/doc/hledger_timedot.5.txt
+++ b/hledger_timedot.txt
@@ -124,4 +124,4 @@ SEE ALSO
-hledger 1.4 September 2017 hledger_timedot(5)
+hledger 1.5 December 2017 hledger_timedot(5)