summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexMason <>2019-10-09 00:02:00 (GMT)
committerhdiff <hdiff@hdiff.luite.com>2019-10-09 00:02:00 (GMT)
commita81d2e17f7701ea575792c00e2d397b12af97411 (patch)
tree6cded4f07b6d0184a9a2fa684f676eb266d76231
parent580c1f38cca655d7c5784f1b9a25d709dd365213 (diff)
version 1.1.0.0HEAD1.1.0.0master
-rw-r--r--Changelog.md7
-rw-r--r--amazonka-s3-streaming.cabal6
-rw-r--r--src/Network/AWS/S3/StreamingUpload.hs9
3 files changed, 13 insertions, 9 deletions
diff --git a/Changelog.md b/Changelog.md
index e67eae7..8bd897b 100644
--- a/Changelog.md
+++ b/Changelog.md
@@ -1,5 +1,8 @@
# Changelog - amazonka-s3-streaming
+## 1.1.0.0
+ - Adds MonadFail constraints, thanks @utdemir
+
## 1.0.0.2
- allow http-client 0.6
@@ -7,7 +10,7 @@
- Update to support conduit >= 1.3 only.
## 0.2.0.5
-- Fix compatibility with
+- Fix compatibility with
## 0.2.0.4
- Make building s3upload executable optional
@@ -29,4 +32,4 @@
more about the data than we do.
* Allow the user to specify how many concurrent threads to use for `concurrentUpload` as
as well as chunk size (#4).
- * Better specify cabal dependency ranges. \ No newline at end of file
+ * Better specify cabal dependency ranges.
diff --git a/amazonka-s3-streaming.cabal b/amazonka-s3-streaming.cabal
index bb647ad..7e45364 100644
--- a/amazonka-s3-streaming.cabal
+++ b/amazonka-s3-streaming.cabal
@@ -1,5 +1,5 @@
name: amazonka-s3-streaming
-version: 1.0.0.2
+version: 1.1.0.0
synopsis: Provides conduits to upload data to S3 using the Multipart API
description: Provides a conduit based streaming interface and a concurrent interface to
uploading data to S3 using the Multipart API. Also provides method to upload
@@ -14,13 +14,13 @@ category: Network, AWS, Cloud, Distributed Computing
build-type: Simple
extra-source-files: README.md, Changelog.md
cabal-version: >=1.10
-tested-with: GHC == 8.0.* || == 8.2.2 || == 8.4.* || == 8.6.*
+tested-with: GHC == 8.0.* || == 8.2.2 || == 8.4.* || == 8.6.* || == 8.8.*
library
hs-source-dirs: src
exposed-modules: Network.AWS.S3.StreamingUpload
default-language: Haskell2010
- build-depends: base >= 4.6 && < 5
+ build-depends: base >= 4.9 && < 5
, amazonka >= 1.6 && < 1.7
, amazonka-core >= 1.6 && < 1.7
, amazonka-s3 >= 1.6 && < 1.7
diff --git a/src/Network/AWS/S3/StreamingUpload.hs b/src/Network/AWS/S3/StreamingUpload.hs
index ea1cb21..fd4ae08 100644
--- a/src/Network/AWS/S3/StreamingUpload.hs
+++ b/src/Network/AWS/S3/StreamingUpload.hs
@@ -33,6 +33,7 @@ import Network.AWS.S3.UploadPart
import Control.Applicative
import Control.Category ( (>>>) )
import Control.Monad ( forM_, when, (>=>) )
+import Control.Monad.Fail ( MonadFail )
import Control.Monad.IO.Class ( MonadIO, liftIO )
import Control.Monad.Morph ( lift )
import Control.Monad.Reader.Class ( local )
@@ -84,7 +85,7 @@ See the AWS documentation for more details.
May throw 'Network.AWS.Error'
-}
-streamUpload :: (MonadUnliftIO m, MonadAWS m)
+streamUpload :: (MonadUnliftIO m, MonadAWS m, MonadFail m)
=> Maybe ChunkSize -- ^ Optional chunk size
-> CreateMultipartUpload -- ^ Upload location
-> ConduitT ByteString Void m (Either (AbortMultipartUploadResponse, SomeException) CompleteMultipartUploadResponse)
@@ -133,7 +134,7 @@ streamUpload mChunkSize multiPartUploadDesc = do
& cMultipartUpload ?~ set cmuParts prts completedMultipartUpload
- performUpload :: MonadAWS m => Int -> Int -> Digest SHA256 -> D.DList ByteString -> m UploadPartResponse
+ performUpload :: (MonadAWS m, MonadFail m) => Int -> Int -> Digest SHA256 -> D.DList ByteString -> m UploadPartResponse
performUpload pnum size digest =
D.toList
>>> sourceList
@@ -143,7 +144,7 @@ streamUpload mChunkSize multiPartUploadDesc = do
>>> send
>=> checkUpload
- checkUpload :: (Monad m) => UploadPartResponse -> m UploadPartResponse
+ checkUpload :: (Monad m, MonadFail m) => UploadPartResponse -> m UploadPartResponse
checkUpload upr = do
when (upr ^. uprsResponseStatus /= 200) $ fail "Failed to upload piece"
return upr
@@ -173,7 +174,7 @@ May throw `Network.AWS.Error`, or `IOError`; an attempt is made to cancel the
multipart upload on any error, but this may also fail if, for example, the network
connection has been broken. See `abortAllUploads` for a crude cleanup method.
-}
-concurrentUpload :: (MonadAWS m)
+concurrentUpload :: (MonadAWS m, MonadFail m)
=> Maybe ChunkSize -- ^ Optional chunk size
-> Maybe NumThreads -- ^ Optional number of threads to upload with
-> UploadLocation -- ^ Whether to upload a file on disk or a `ByteString` that's already in memory.