summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrendanHay <>2016-05-09 19:13:00 (GMT)
committerhdiff <hdiff@hdiff.luite.com>2016-05-09 19:13:00 (GMT)
commit8601db639de8335a62aa7922e0817c118540bef2 (patch)
treedc2fd2f3e7460f45249d349ac2340a46a12868a6
parentdf14729206fa1c150cda8f12b3d391463f931c19 (diff)
version 1.4.11.4.1
-rw-r--r--README.md8
-rw-r--r--amazonka-kinesis.cabal18
-rw-r--r--fixture/DisableEnhancedMonitoring.yaml0
-rw-r--r--fixture/DisableEnhancedMonitoringResponse.proto0
-rw-r--r--fixture/EnableEnhancedMonitoring.yaml0
-rw-r--r--fixture/EnableEnhancedMonitoringResponse.proto0
-rw-r--r--gen/Network/AWS/Kinesis.hs30
-rw-r--r--gen/Network/AWS/Kinesis/AddTagsToStream.hs4
-rw-r--r--gen/Network/AWS/Kinesis/CreateStream.hs21
-rw-r--r--gen/Network/AWS/Kinesis/DecreaseStreamRetentionPeriod.hs10
-rw-r--r--gen/Network/AWS/Kinesis/DeleteStream.hs12
-rw-r--r--gen/Network/AWS/Kinesis/DescribeStream.hs15
-rw-r--r--gen/Network/AWS/Kinesis/DisableEnhancedMonitoring.hs128
-rw-r--r--gen/Network/AWS/Kinesis/EnableEnhancedMonitoring.hs128
-rw-r--r--gen/Network/AWS/Kinesis/GetRecords.hs50
-rw-r--r--gen/Network/AWS/Kinesis/GetShardIterator.hs93
-rw-r--r--gen/Network/AWS/Kinesis/IncreaseStreamRetentionPeriod.hs15
-rw-r--r--gen/Network/AWS/Kinesis/ListStreams.hs6
-rw-r--r--gen/Network/AWS/Kinesis/ListTagsForStream.hs4
-rw-r--r--gen/Network/AWS/Kinesis/MergeShards.hs24
-rw-r--r--gen/Network/AWS/Kinesis/PutRecord.hs37
-rw-r--r--gen/Network/AWS/Kinesis/PutRecords.hs24
-rw-r--r--gen/Network/AWS/Kinesis/RemoveTagsFromStream.hs8
-rw-r--r--gen/Network/AWS/Kinesis/SplitShard.hs36
-rw-r--r--gen/Network/AWS/Kinesis/Types.hs26
-rw-r--r--gen/Network/AWS/Kinesis/Types/Product.hs150
-rw-r--r--gen/Network/AWS/Kinesis/Types/Sum.hs54
-rw-r--r--test/Test/AWS/Gen/Kinesis.hs36
28 files changed, 774 insertions, 163 deletions
diff --git a/README.md b/README.md
index ace4112..9dde665 100644
--- a/README.md
+++ b/README.md
@@ -8,15 +8,15 @@
## Version
-`1.4.0`
+`1.4.1`
## Description
-Amazon Kinesis Service API Reference
+Amazon Kinesis Streams Service API Reference
-Amazon Kinesis is a managed service that scales elastically for real
-time processing of streaming big data.
+Amazon Kinesis Streams is a managed service that scales elastically for
+real time processing of streaming big data.
Documentation is available via [Hackage](http://hackage.haskell.org/package/amazonka-kinesis)
and the [AWS API Reference](https://aws.amazon.com/documentation/).
diff --git a/amazonka-kinesis.cabal b/amazonka-kinesis.cabal
index 81bb2e2..28066d0 100644
--- a/amazonka-kinesis.cabal
+++ b/amazonka-kinesis.cabal
@@ -1,5 +1,5 @@
name: amazonka-kinesis
-version: 1.4.0
+version: 1.4.1
synopsis: Amazon Kinesis SDK.
homepage: https://github.com/brendanhay/amazonka
bug-reports: https://github.com/brendanhay/amazonka/issues
@@ -13,10 +13,10 @@ build-type: Simple
cabal-version: >= 1.10
extra-source-files: README.md fixture/*.yaml fixture/*.proto
description:
- Amazon Kinesis Service API Reference
+ Amazon Kinesis Streams Service API Reference
- Amazon Kinesis is a managed service that scales elastically for real
- time processing of streaming big data.
+ Amazon Kinesis Streams is a managed service that scales elastically for
+ real time processing of streaming big data.
.
The types from this library are intended to be used with
<http://hackage.haskell.org/package/amazonka amazonka>, which provides
@@ -49,6 +49,8 @@ library
, Network.AWS.Kinesis.DecreaseStreamRetentionPeriod
, Network.AWS.Kinesis.DeleteStream
, Network.AWS.Kinesis.DescribeStream
+ , Network.AWS.Kinesis.DisableEnhancedMonitoring
+ , Network.AWS.Kinesis.EnableEnhancedMonitoring
, Network.AWS.Kinesis.GetRecords
, Network.AWS.Kinesis.GetShardIterator
, Network.AWS.Kinesis.IncreaseStreamRetentionPeriod
@@ -67,7 +69,7 @@ library
, Network.AWS.Kinesis.Types.Sum
build-depends:
- amazonka-core == 1.4.0.*
+ amazonka-core == 1.4.1.*
, base >= 4.7 && < 5
test-suite amazonka-kinesis-test
@@ -87,9 +89,9 @@ test-suite amazonka-kinesis-test
, Test.AWS.Kinesis.Internal
build-depends:
- amazonka-core == 1.4.0.*
- , amazonka-test == 1.4.0.*
- , amazonka-kinesis == 1.4.0.*
+ amazonka-core == 1.4.1.*
+ , amazonka-test == 1.4.1.*
+ , amazonka-kinesis == 1.4.1.*
, base
, bytestring
, tasty
diff --git a/fixture/DisableEnhancedMonitoring.yaml b/fixture/DisableEnhancedMonitoring.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/fixture/DisableEnhancedMonitoring.yaml
diff --git a/fixture/DisableEnhancedMonitoringResponse.proto b/fixture/DisableEnhancedMonitoringResponse.proto
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/fixture/DisableEnhancedMonitoringResponse.proto
diff --git a/fixture/EnableEnhancedMonitoring.yaml b/fixture/EnableEnhancedMonitoring.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/fixture/EnableEnhancedMonitoring.yaml
diff --git a/fixture/EnableEnhancedMonitoringResponse.proto b/fixture/EnableEnhancedMonitoringResponse.proto
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/fixture/EnableEnhancedMonitoringResponse.proto
diff --git a/gen/Network/AWS/Kinesis.hs b/gen/Network/AWS/Kinesis.hs
index 786b042..b436736 100644
--- a/gen/Network/AWS/Kinesis.hs
+++ b/gen/Network/AWS/Kinesis.hs
@@ -11,10 +11,10 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Amazon Kinesis Service API Reference
+-- Amazon Kinesis Streams Service API Reference
--
--- Amazon Kinesis is a managed service that scales elastically for real
--- time processing of streaming big data.
+-- Amazon Kinesis Streams is a managed service that scales elastically for
+-- real time processing of streaming big data.
module Network.AWS.Kinesis
(
-- * Service Configuration
@@ -68,6 +68,12 @@ module Network.AWS.Kinesis
-- ** GetRecords
, module Network.AWS.Kinesis.GetRecords
+ -- ** EnableEnhancedMonitoring
+ , module Network.AWS.Kinesis.EnableEnhancedMonitoring
+
+ -- ** DisableEnhancedMonitoring
+ , module Network.AWS.Kinesis.DisableEnhancedMonitoring
+
-- ** ListTagsForStream
, module Network.AWS.Kinesis.ListTagsForStream
@@ -100,12 +106,27 @@ module Network.AWS.Kinesis
-- * Types
+ -- ** MetricsName
+ , MetricsName (..)
+
-- ** ShardIteratorType
, ShardIteratorType (..)
-- ** StreamStatus
, StreamStatus (..)
+ -- ** EnhancedMetrics
+ , EnhancedMetrics
+ , enhancedMetrics
+ , emShardLevelMetrics
+
+ -- ** EnhancedMonitoringOutput
+ , EnhancedMonitoringOutput
+ , enhancedMonitoringOutput
+ , emoDesiredShardLevelMetrics
+ , emoCurrentShardLevelMetrics
+ , emoStreamName
+
-- ** HashKeyRange
, HashKeyRange
, hashKeyRange
@@ -159,6 +180,7 @@ module Network.AWS.Kinesis
, sdShards
, sdHasMoreShards
, sdRetentionPeriodHours
+ , sdEnhancedMonitoring
-- ** Tag
, Tag
@@ -172,6 +194,8 @@ import Network.AWS.Kinesis.CreateStream
import Network.AWS.Kinesis.DecreaseStreamRetentionPeriod
import Network.AWS.Kinesis.DeleteStream
import Network.AWS.Kinesis.DescribeStream
+import Network.AWS.Kinesis.DisableEnhancedMonitoring
+import Network.AWS.Kinesis.EnableEnhancedMonitoring
import Network.AWS.Kinesis.GetRecords
import Network.AWS.Kinesis.GetShardIterator
import Network.AWS.Kinesis.IncreaseStreamRetentionPeriod
diff --git a/gen/Network/AWS/Kinesis/AddTagsToStream.hs b/gen/Network/AWS/Kinesis/AddTagsToStream.hs
index f1ba7ca..0ebd4cc 100644
--- a/gen/Network/AWS/Kinesis/AddTagsToStream.hs
+++ b/gen/Network/AWS/Kinesis/AddTagsToStream.hs
@@ -83,6 +83,8 @@ instance AWSRequest AddTagsToStream where
instance Hashable AddTagsToStream
+instance NFData AddTagsToStream
+
instance ToHeaders AddTagsToStream where
toHeaders
= const
@@ -115,3 +117,5 @@ data AddTagsToStreamResponse =
addTagsToStreamResponse
:: AddTagsToStreamResponse
addTagsToStreamResponse = AddTagsToStreamResponse'
+
+instance NFData AddTagsToStreamResponse
diff --git a/gen/Network/AWS/Kinesis/CreateStream.hs b/gen/Network/AWS/Kinesis/CreateStream.hs
index 46202eb..7eaa9d4 100644
--- a/gen/Network/AWS/Kinesis/CreateStream.hs
+++ b/gen/Network/AWS/Kinesis/CreateStream.hs
@@ -18,11 +18,11 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Creates a Amazon Kinesis stream. A stream captures and transports data
+-- Creates an Amazon Kinesis stream. A stream captures and transports data
-- records that are continuously emitted from different data sources or
--- /producers/. Scale-out within an Amazon Kinesis stream is explicitly
--- supported by means of shards, which are uniquely identified groups of
--- data records in an Amazon Kinesis stream.
+-- /producers/. Scale-out within a stream is explicitly supported by means
+-- of shards, which are uniquely identified groups of data records in a
+-- stream.
--
-- You specify and control the number of shards that a stream is composed
-- of. Each shard can support reads up to 5 transactions per second, up to
@@ -52,8 +52,9 @@
-- - Create more shards than are authorized for your account.
--
-- For the default shard limit for an AWS account, see
--- <http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html Amazon Kinesis Limits>.
--- If you need to increase this limit,
+-- <http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html Streams Limits>
+-- in the /Amazon Kinesis Streams Developer Guide/. If you need to increase
+-- this limit,
-- <http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html contact AWS Support>.
--
-- You can use 'DescribeStream' to check the stream status, which is
@@ -109,8 +110,8 @@ createStream pStreamName_ pShardCount_ =
-- | A name to identify the stream. The stream name is scoped to the AWS
-- account used by the application that creates the stream. It is also
-- scoped by region. That is, two streams in two different AWS accounts can
--- have the same name, and two streams in the same AWS account, but in two
--- different regions, can have the same name.
+-- have the same name, and two streams in the same AWS account but in two
+-- different regions can have the same name.
csStreamName :: Lens' CreateStream Text
csStreamName = lens _csStreamName (\ s a -> s{_csStreamName = a});
@@ -129,6 +130,8 @@ instance AWSRequest CreateStream where
instance Hashable CreateStream
+instance NFData CreateStream
+
instance ToHeaders CreateStream where
toHeaders
= const
@@ -161,3 +164,5 @@ data CreateStreamResponse =
createStreamResponse
:: CreateStreamResponse
createStreamResponse = CreateStreamResponse'
+
+instance NFData CreateStreamResponse
diff --git a/gen/Network/AWS/Kinesis/DecreaseStreamRetentionPeriod.hs b/gen/Network/AWS/Kinesis/DecreaseStreamRetentionPeriod.hs
index bde1797..54276d0 100644
--- a/gen/Network/AWS/Kinesis/DecreaseStreamRetentionPeriod.hs
+++ b/gen/Network/AWS/Kinesis/DecreaseStreamRetentionPeriod.hs
@@ -18,9 +18,9 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Decreases the stream\'s retention period, which is the length of time
--- data records are accessible after they are added to the stream. The
--- minimum value of a stream’s retention period is 24 hours.
+-- Decreases the Amazon Kinesis stream\'s retention period, which is the
+-- length of time data records are accessible after they are added to the
+-- stream. The minimum value of a stream\'s retention period is 24 hours.
--
-- This operation may result in lost data. For example, if the stream\'s
-- retention period is 48 hours and is decreased to 24 hours, any data
@@ -90,6 +90,8 @@ instance AWSRequest DecreaseStreamRetentionPeriod
instance Hashable DecreaseStreamRetentionPeriod
+instance NFData DecreaseStreamRetentionPeriod
+
instance ToHeaders DecreaseStreamRetentionPeriod
where
toHeaders
@@ -126,3 +128,5 @@ data DecreaseStreamRetentionPeriodResponse =
decreaseStreamRetentionPeriodResponse
:: DecreaseStreamRetentionPeriodResponse
decreaseStreamRetentionPeriodResponse = DecreaseStreamRetentionPeriodResponse'
+
+instance NFData DecreaseStreamRetentionPeriodResponse
diff --git a/gen/Network/AWS/Kinesis/DeleteStream.hs b/gen/Network/AWS/Kinesis/DeleteStream.hs
index e71c0f6..9c56a68 100644
--- a/gen/Network/AWS/Kinesis/DeleteStream.hs
+++ b/gen/Network/AWS/Kinesis/DeleteStream.hs
@@ -18,10 +18,10 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Deletes a stream and all its shards and data. You must shut down any
--- applications that are operating on the stream before you delete the
--- stream. If an application attempts to operate on a deleted stream, it
--- will receive the exception 'ResourceNotFoundException'.
+-- Deletes an Amazon Kinesis stream and all its shards and data. You must
+-- shut down any applications that are operating on the stream before you
+-- delete the stream. If an application attempts to operate on a deleted
+-- stream, it will receive the exception 'ResourceNotFoundException'.
--
-- If the stream is in the 'ACTIVE' state, you can delete it. After a
-- 'DeleteStream' request, the specified stream is in the 'DELETING' state
@@ -89,6 +89,8 @@ instance AWSRequest DeleteStream where
instance Hashable DeleteStream
+instance NFData DeleteStream
+
instance ToHeaders DeleteStream where
toHeaders
= const
@@ -119,3 +121,5 @@ data DeleteStreamResponse =
deleteStreamResponse
:: DeleteStreamResponse
deleteStreamResponse = DeleteStreamResponse'
+
+instance NFData DeleteStreamResponse
diff --git a/gen/Network/AWS/Kinesis/DescribeStream.hs b/gen/Network/AWS/Kinesis/DescribeStream.hs
index 48b3a1a..b2a21ab 100644
--- a/gen/Network/AWS/Kinesis/DescribeStream.hs
+++ b/gen/Network/AWS/Kinesis/DescribeStream.hs
@@ -18,16 +18,15 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Describes the specified stream.
+-- Describes the specified Amazon Kinesis stream.
--
-- The information about the stream includes its current status, its Amazon
-- Resource Name (ARN), and an array of shard objects. For each shard
-- object, there is information about the hash key and sequence number
-- ranges that the shard spans, and the IDs of any earlier shards that
-- played in a role in creating the shard. A sequence number is the
--- identifier associated with every record ingested in the Amazon Kinesis
--- stream. The sequence number is assigned when a record is put into the
--- stream.
+-- identifier associated with every record ingested in the stream. The
+-- sequence number is assigned when a record is put into the stream.
--
-- You can limit the number of returned shards using the 'Limit' parameter.
-- The number of shards in a stream may be too large to return from a
@@ -40,6 +39,10 @@
-- returned. Specify this ID in the 'ExclusiveStartShardId' parameter in a
-- subsequent request to 'DescribeStream'.
--
+-- There are no guarantees about the chronological order shards returned in
+-- 'DescribeStream' results. If you want to process shards in chronological
+-- order, use 'ParentShardId' to track lineage to the oldest shard.
+--
-- < DescribeStream> has a limit of 10 transactions per second per account.
--
-- This operation returns paginated results.
@@ -135,6 +138,8 @@ instance AWSRequest DescribeStream where
instance Hashable DescribeStream
+instance NFData DescribeStream
+
instance ToHeaders DescribeStream where
toHeaders
= const
@@ -193,3 +198,5 @@ dsrsResponseStatus = lens _dsrsResponseStatus (\ s a -> s{_dsrsResponseStatus =
-- shards available.
dsrsStreamDescription :: Lens' DescribeStreamResponse StreamDescription
dsrsStreamDescription = lens _dsrsStreamDescription (\ s a -> s{_dsrsStreamDescription = a});
+
+instance NFData DescribeStreamResponse
diff --git a/gen/Network/AWS/Kinesis/DisableEnhancedMonitoring.hs b/gen/Network/AWS/Kinesis/DisableEnhancedMonitoring.hs
new file mode 100644
index 0000000..31e2f74
--- /dev/null
+++ b/gen/Network/AWS/Kinesis/DisableEnhancedMonitoring.hs
@@ -0,0 +1,128 @@
+{-# LANGUAGE DeriveDataTypeable #-}
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE RecordWildCards #-}
+{-# LANGUAGE TypeFamilies #-}
+
+{-# OPTIONS_GHC -fno-warn-unused-imports #-}
+{-# OPTIONS_GHC -fno-warn-unused-binds #-}
+{-# OPTIONS_GHC -fno-warn-unused-matches #-}
+
+-- Derived from AWS service descriptions, licensed under Apache 2.0.
+
+-- |
+-- Module : Network.AWS.Kinesis.DisableEnhancedMonitoring
+-- Copyright : (c) 2013-2016 Brendan Hay
+-- License : Mozilla Public License, v. 2.0.
+-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
+-- Stability : auto-generated
+-- Portability : non-portable (GHC extensions)
+--
+-- Disables enhanced monitoring.
+module Network.AWS.Kinesis.DisableEnhancedMonitoring
+ (
+ -- * Creating a Request
+ disableEnhancedMonitoring
+ , DisableEnhancedMonitoring
+ -- * Request Lenses
+ , demStreamName
+ , demShardLevelMetrics
+
+ -- * Destructuring the Response
+ , enhancedMonitoringOutput
+ , EnhancedMonitoringOutput
+ -- * Response Lenses
+ , emoDesiredShardLevelMetrics
+ , emoCurrentShardLevelMetrics
+ , emoStreamName
+ ) where
+
+import Network.AWS.Kinesis.Types
+import Network.AWS.Kinesis.Types.Product
+import Network.AWS.Lens
+import Network.AWS.Prelude
+import Network.AWS.Request
+import Network.AWS.Response
+
+-- | Represents the input for < DisableEnhancedMonitoring>.
+--
+-- /See:/ 'disableEnhancedMonitoring' smart constructor.
+data DisableEnhancedMonitoring = DisableEnhancedMonitoring'
+ { _demStreamName :: !Text
+ , _demShardLevelMetrics :: !(List1 MetricsName)
+ } deriving (Eq,Read,Show,Data,Typeable,Generic)
+
+-- | Creates a value of 'DisableEnhancedMonitoring' with the minimum fields required to make a request.
+--
+-- Use one of the following lenses to modify other fields as desired:
+--
+-- * 'demStreamName'
+--
+-- * 'demShardLevelMetrics'
+disableEnhancedMonitoring
+ :: Text -- ^ 'demStreamName'
+ -> NonEmpty MetricsName -- ^ 'demShardLevelMetrics'
+ -> DisableEnhancedMonitoring
+disableEnhancedMonitoring pStreamName_ pShardLevelMetrics_ =
+ DisableEnhancedMonitoring'
+ { _demStreamName = pStreamName_
+ , _demShardLevelMetrics = _List1 # pShardLevelMetrics_
+ }
+
+-- | The name of the Amazon Kinesis stream for which to disable enhanced
+-- monitoring.
+demStreamName :: Lens' DisableEnhancedMonitoring Text
+demStreamName = lens _demStreamName (\ s a -> s{_demStreamName = a});
+
+-- | List of shard-level metrics to disable.
+--
+-- The following are the valid shard-level metrics. The value \"'ALL'\"
+-- disables every metric.
+--
+-- - 'IncomingBytes'
+-- - 'IncomingRecords'
+-- - 'OutgoingBytes'
+-- - 'OutgoingRecords'
+-- - 'WriteProvisionedThroughputExceeded'
+-- - 'ReadProvisionedThroughputExceeded'
+-- - 'IteratorAgeMilliseconds'
+-- - 'ALL'
+--
+-- For more information, see
+-- <http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html Monitoring the Amazon Kinesis Streams Service with Amazon CloudWatch>
+-- in the /Amazon Kinesis Streams Developer Guide/.
+demShardLevelMetrics :: Lens' DisableEnhancedMonitoring (NonEmpty MetricsName)
+demShardLevelMetrics = lens _demShardLevelMetrics (\ s a -> s{_demShardLevelMetrics = a}) . _List1;
+
+instance AWSRequest DisableEnhancedMonitoring where
+ type Rs DisableEnhancedMonitoring =
+ EnhancedMonitoringOutput
+ request = postJSON kinesis
+ response = receiveJSON (\ s h x -> eitherParseJSON x)
+
+instance Hashable DisableEnhancedMonitoring
+
+instance NFData DisableEnhancedMonitoring
+
+instance ToHeaders DisableEnhancedMonitoring where
+ toHeaders
+ = const
+ (mconcat
+ ["X-Amz-Target" =#
+ ("Kinesis_20131202.DisableEnhancedMonitoring" ::
+ ByteString),
+ "Content-Type" =#
+ ("application/x-amz-json-1.1" :: ByteString)])
+
+instance ToJSON DisableEnhancedMonitoring where
+ toJSON DisableEnhancedMonitoring'{..}
+ = object
+ (catMaybes
+ [Just ("StreamName" .= _demStreamName),
+ Just ("ShardLevelMetrics" .= _demShardLevelMetrics)])
+
+instance ToPath DisableEnhancedMonitoring where
+ toPath = const "/"
+
+instance ToQuery DisableEnhancedMonitoring where
+ toQuery = const mempty
diff --git a/gen/Network/AWS/Kinesis/EnableEnhancedMonitoring.hs b/gen/Network/AWS/Kinesis/EnableEnhancedMonitoring.hs
new file mode 100644
index 0000000..4799b95
--- /dev/null
+++ b/gen/Network/AWS/Kinesis/EnableEnhancedMonitoring.hs
@@ -0,0 +1,128 @@
+{-# LANGUAGE DeriveDataTypeable #-}
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE RecordWildCards #-}
+{-# LANGUAGE TypeFamilies #-}
+
+{-# OPTIONS_GHC -fno-warn-unused-imports #-}
+{-# OPTIONS_GHC -fno-warn-unused-binds #-}
+{-# OPTIONS_GHC -fno-warn-unused-matches #-}
+
+-- Derived from AWS service descriptions, licensed under Apache 2.0.
+
+-- |
+-- Module : Network.AWS.Kinesis.EnableEnhancedMonitoring
+-- Copyright : (c) 2013-2016 Brendan Hay
+-- License : Mozilla Public License, v. 2.0.
+-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
+-- Stability : auto-generated
+-- Portability : non-portable (GHC extensions)
+--
+-- Enables enhanced Amazon Kinesis stream monitoring for shard-level
+-- metrics.
+module Network.AWS.Kinesis.EnableEnhancedMonitoring
+ (
+ -- * Creating a Request
+ enableEnhancedMonitoring
+ , EnableEnhancedMonitoring
+ -- * Request Lenses
+ , eemStreamName
+ , eemShardLevelMetrics
+
+ -- * Destructuring the Response
+ , enhancedMonitoringOutput
+ , EnhancedMonitoringOutput
+ -- * Response Lenses
+ , emoDesiredShardLevelMetrics
+ , emoCurrentShardLevelMetrics
+ , emoStreamName
+ ) where
+
+import Network.AWS.Kinesis.Types
+import Network.AWS.Kinesis.Types.Product
+import Network.AWS.Lens
+import Network.AWS.Prelude
+import Network.AWS.Request
+import Network.AWS.Response
+
+-- | Represents the input for < EnableEnhancedMonitoring>.
+--
+-- /See:/ 'enableEnhancedMonitoring' smart constructor.
+data EnableEnhancedMonitoring = EnableEnhancedMonitoring'
+ { _eemStreamName :: !Text
+ , _eemShardLevelMetrics :: !(List1 MetricsName)
+ } deriving (Eq,Read,Show,Data,Typeable,Generic)
+
+-- | Creates a value of 'EnableEnhancedMonitoring' with the minimum fields required to make a request.
+--
+-- Use one of the following lenses to modify other fields as desired:
+--
+-- * 'eemStreamName'
+--
+-- * 'eemShardLevelMetrics'
+enableEnhancedMonitoring
+ :: Text -- ^ 'eemStreamName'
+ -> NonEmpty MetricsName -- ^ 'eemShardLevelMetrics'
+ -> EnableEnhancedMonitoring
+enableEnhancedMonitoring pStreamName_ pShardLevelMetrics_ =
+ EnableEnhancedMonitoring'
+ { _eemStreamName = pStreamName_
+ , _eemShardLevelMetrics = _List1 # pShardLevelMetrics_
+ }
+
+-- | The name of the stream for which to enable enhanced monitoring.
+eemStreamName :: Lens' EnableEnhancedMonitoring Text
+eemStreamName = lens _eemStreamName (\ s a -> s{_eemStreamName = a});
+
+-- | List of shard-level metrics to enable.
+--
+-- The following are the valid shard-level metrics. The value \"'ALL'\"
+-- enables every metric.
+--
+-- - 'IncomingBytes'
+-- - 'IncomingRecords'
+-- - 'OutgoingBytes'
+-- - 'OutgoingRecords'
+-- - 'WriteProvisionedThroughputExceeded'
+-- - 'ReadProvisionedThroughputExceeded'
+-- - 'IteratorAgeMilliseconds'
+-- - 'ALL'
+--
+-- For more information, see
+-- <http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html Monitoring the Amazon Kinesis Streams Service with Amazon CloudWatch>
+-- in the /Amazon Kinesis Streams Developer Guide/.
+eemShardLevelMetrics :: Lens' EnableEnhancedMonitoring (NonEmpty MetricsName)
+eemShardLevelMetrics = lens _eemShardLevelMetrics (\ s a -> s{_eemShardLevelMetrics = a}) . _List1;
+
+instance AWSRequest EnableEnhancedMonitoring where
+ type Rs EnableEnhancedMonitoring =
+ EnhancedMonitoringOutput
+ request = postJSON kinesis
+ response = receiveJSON (\ s h x -> eitherParseJSON x)
+
+instance Hashable EnableEnhancedMonitoring
+
+instance NFData EnableEnhancedMonitoring
+
+instance ToHeaders EnableEnhancedMonitoring where
+ toHeaders
+ = const
+ (mconcat
+ ["X-Amz-Target" =#
+ ("Kinesis_20131202.EnableEnhancedMonitoring" ::
+ ByteString),
+ "Content-Type" =#
+ ("application/x-amz-json-1.1" :: ByteString)])
+
+instance ToJSON EnableEnhancedMonitoring where
+ toJSON EnableEnhancedMonitoring'{..}
+ = object
+ (catMaybes
+ [Just ("StreamName" .= _eemStreamName),
+ Just ("ShardLevelMetrics" .= _eemShardLevelMetrics)])
+
+instance ToPath EnableEnhancedMonitoring where
+ toPath = const "/"
+
+instance ToQuery EnableEnhancedMonitoring where
+ toQuery = const mempty
diff --git a/gen/Network/AWS/Kinesis/GetRecords.hs b/gen/Network/AWS/Kinesis/GetRecords.hs
index 0c7b558..6cc45c6 100644
--- a/gen/Network/AWS/Kinesis/GetRecords.hs
+++ b/gen/Network/AWS/Kinesis/GetRecords.hs
@@ -18,7 +18,7 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Gets data records from a shard.
+-- Gets data records from an Amazon Kinesis stream\'s shard.
--
-- Specify a shard iterator using the 'ShardIterator' parameter. The shard
-- iterator specifies the position in the shard from which you want to
@@ -27,11 +27,14 @@
-- < GetRecords> returns an empty list. Note that it might take multiple
-- calls to get to a portion of the shard that contains records.
--
--- You can scale by provisioning multiple shards. Your application should
--- have one thread per shard, each reading continuously from its stream. To
--- read from a stream continually, call < GetRecords> in a loop. Use
--- < GetShardIterator> to get the shard iterator to specify in the first
--- < GetRecords> call. < GetRecords> returns a new shard iterator in
+-- You can scale by provisioning multiple shards per stream while
+-- considering service limits (for more information, see
+-- <http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html Streams Limits>
+-- in the /Amazon Kinesis Streams Developer Guide/). Your application
+-- should have one thread per shard, each reading continuously from its
+-- stream. To read from a stream continually, call < GetRecords> in a loop.
+-- Use < GetShardIterator> to get the shard iterator to specify in the
+-- first < GetRecords> call. < GetRecords> returns a new shard iterator in
-- 'NextShardIterator'. Specify the shard iterator returned in
-- 'NextShardIterator' in subsequent calls to < GetRecords>. Note that if
-- the shard has been closed, the shard iterator can\'t return more data
@@ -46,10 +49,10 @@
-- specify the maximum number of records that < GetRecords> can return.
-- Consider your average record size when determining this limit.
--
--- The size of the data returned by < GetRecords> will vary depending on
--- the utilization of the shard. The maximum size of data that
--- < GetRecords> can return is 10 MB. If a call returns this amount of
--- data, subsequent calls made within the next 5 seconds throw
+-- The size of the data returned by < GetRecords> varies depending on the
+-- utilization of the shard. The maximum size of data that < GetRecords>
+-- can return is 10 MB. If a call returns this amount of data, subsequent
+-- calls made within the next 5 seconds throw
-- 'ProvisionedThroughputExceededException'. If there is insufficient
-- provisioned throughput on the shard, subsequent calls made within the
-- next 1 second throw 'ProvisionedThroughputExceededException'. Note that
@@ -60,19 +63,20 @@
--
-- To detect whether the application is falling behind in processing, you
-- can use the 'MillisBehindLatest' response attribute. You can also
--- monitor the stream using CloudWatch metrics (see
--- <http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html Monitoring Amazon Kinesis>
--- in the /Amazon Kinesis Developer Guide/).
+-- monitor the stream using CloudWatch metrics and other mechanisms (see
+-- <http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html Monitoring>
+-- in the /Amazon Kinesis Streams Developer Guide/).
--
-- Each Amazon Kinesis record includes a value,
--- 'ApproximateArrivalTimestamp', that is set when an Amazon Kinesis stream
--- successfully receives and stores a record. This is commonly referred to
--- as a server-side timestamp, which is different than a client-side
--- timestamp, where the timestamp is set when a data producer creates or
--- sends the record to a stream. The timestamp has millisecond precision.
--- There are no guarantees about the timestamp accuracy, or that the
--- timestamp is always increasing. For example, records in a shard or
--- across a stream might have timestamps that are out of order.
+-- 'ApproximateArrivalTimestamp', that is set when a stream successfully
+-- receives and stores a record. This is commonly referred to as a
+-- server-side timestamp, whereas a client-side timestamp is set when a
+-- data producer creates or sends the record to a stream (a data producer
+-- is any data source putting data records into a stream, for example with
+-- < PutRecords>). The timestamp has millisecond precision. There are no
+-- guarantees about the timestamp accuracy, or that the timestamp is always
+-- increasing. For example, records in a shard or across a stream might
+-- have timestamps that are out of order.
module Network.AWS.Kinesis.GetRecords
(
-- * Creating a Request
@@ -149,6 +153,8 @@ instance AWSRequest GetRecords where
instance Hashable GetRecords
+instance NFData GetRecords
+
instance ToHeaders GetRecords where
toHeaders
= const
@@ -223,3 +229,5 @@ grrsResponseStatus = lens _grrsResponseStatus (\ s a -> s{_grrsResponseStatus =
-- | The data records retrieved from the shard.
grrsRecords :: Lens' GetRecordsResponse [Record]
grrsRecords = lens _grrsRecords (\ s a -> s{_grrsRecords = a}) . _Coerce;
+
+instance NFData GetRecordsResponse
diff --git a/gen/Network/AWS/Kinesis/GetShardIterator.hs b/gen/Network/AWS/Kinesis/GetShardIterator.hs
index 48fedd5..ed36792 100644
--- a/gen/Network/AWS/Kinesis/GetShardIterator.hs
+++ b/gen/Network/AWS/Kinesis/GetShardIterator.hs
@@ -18,15 +18,15 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Gets a shard iterator. A shard iterator expires five minutes after it is
--- returned to the requester.
+-- Gets an Amazon Kinesis shard iterator. A shard iterator expires five
+-- minutes after it is returned to the requester.
--
--- A shard iterator specifies the position in the shard from which to start
--- reading data records sequentially. A shard iterator specifies this
--- position using the sequence number of a data record in a shard. A
--- sequence number is the identifier associated with every record ingested
--- in the Amazon Kinesis stream. The sequence number is assigned when a
--- record is put into the stream.
+-- A shard iterator specifies the shard position from which to start
+-- reading data records sequentially. The position is specified using the
+-- sequence number of a data record in a shard. A sequence number is the
+-- identifier associated with every record ingested in the stream, and is
+-- assigned when a record is put into the stream. Each stream has one or
+-- more shards.
--
-- You must specify the shard iterator type. For example, you can set the
-- 'ShardIteratorType' parameter to read exactly from the position denoted
@@ -34,28 +34,29 @@
-- iterator type, or right after the sequence number by using the
-- 'AFTER_SEQUENCE_NUMBER' shard iterator type, using sequence numbers
-- returned by earlier calls to < PutRecord>, < PutRecords>, < GetRecords>,
--- or < DescribeStream>. You can specify the shard iterator type
--- 'TRIM_HORIZON' in the request to cause 'ShardIterator' to point to the
--- last untrimmed record in the shard in the system, which is the oldest
--- data record in the shard. Or you can point to just after the most recent
--- record in the shard, by using the shard iterator type 'LATEST', so that
--- you always read the most recent data in the shard.
---
--- When you repeatedly read from an Amazon Kinesis stream use a
--- < GetShardIterator> request to get the first shard iterator for use in
--- your first < GetRecords> request and then use the shard iterator
--- returned by the < GetRecords> request in 'NextShardIterator' for
--- subsequent reads. A new shard iterator is returned by every
--- < GetRecords> request in 'NextShardIterator', which you use in the
--- 'ShardIterator' parameter of the next < GetRecords> request.
+-- or < DescribeStream>. In the request, you can specify the shard iterator
+-- type 'AT_TIMESTAMP' to read records from an arbitrary point in time,
+-- 'TRIM_HORIZON' to cause 'ShardIterator' to point to the last untrimmed
+-- record in the shard in the system (the oldest data record in the shard),
+-- or 'LATEST' so that you always read the most recent data in the shard.
+--
+-- When you read repeatedly from a stream, use a < GetShardIterator>
+-- request to get the first shard iterator for use in your first
+-- < GetRecords> request and for subsequent reads use the shard iterator
+-- returned by the < GetRecords> request in 'NextShardIterator'. A new
+-- shard iterator is returned by every < GetRecords> request in
+-- 'NextShardIterator', which you use in the 'ShardIterator' parameter of
+-- the next < GetRecords> request.
--
-- If a < GetShardIterator> request is made too often, you receive a
-- 'ProvisionedThroughputExceededException'. For more information about
--- throughput limits, see < GetRecords>.
+-- throughput limits, see < GetRecords>, and
+-- <http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html Streams Limits>
+-- in the /Amazon Kinesis Streams Developer Guide/.
--
--- If the shard is closed, the iterator can\'t return more data, and
--- < GetShardIterator> returns 'null' for its 'ShardIterator'. A shard can
--- be closed using < SplitShard> or < MergeShards>.
+-- If the shard is closed, < GetShardIterator> returns a valid iterator for
+-- the last sequence number of the shard. Note that a shard can be closed
+-- as a result of using < SplitShard> or < MergeShards>.
--
-- < GetShardIterator> has a limit of 5 transactions per second per account
-- per open shard.
@@ -66,6 +67,7 @@ module Network.AWS.Kinesis.GetShardIterator
, GetShardIterator
-- * Request Lenses
, gsiStartingSequenceNumber
+ , gsiTimestamp
, gsiStreamName
, gsiShardId
, gsiShardIteratorType
@@ -90,6 +92,7 @@ import Network.AWS.Response
-- /See:/ 'getShardIterator' smart constructor.
data GetShardIterator = GetShardIterator'
{ _gsiStartingSequenceNumber :: !(Maybe Text)
+ , _gsiTimestamp :: !(Maybe POSIX)
, _gsiStreamName :: !Text
, _gsiShardId :: !Text
, _gsiShardIteratorType :: !ShardIteratorType
@@ -101,6 +104,8 @@ data GetShardIterator = GetShardIterator'
--
-- * 'gsiStartingSequenceNumber'
--
+-- * 'gsiTimestamp'
+--
-- * 'gsiStreamName'
--
-- * 'gsiShardId'
@@ -114,33 +119,50 @@ getShardIterator
getShardIterator pStreamName_ pShardId_ pShardIteratorType_ =
GetShardIterator'
{ _gsiStartingSequenceNumber = Nothing
+ , _gsiTimestamp = Nothing
, _gsiStreamName = pStreamName_
, _gsiShardId = pShardId_
, _gsiShardIteratorType = pShardIteratorType_
}
-- | The sequence number of the data record in the shard from which to start
--- reading from.
+-- reading. Used with shard iterator type AT_SEQUENCE_NUMBER and
+-- AFTER_SEQUENCE_NUMBER.
gsiStartingSequenceNumber :: Lens' GetShardIterator (Maybe Text)
gsiStartingSequenceNumber = lens _gsiStartingSequenceNumber (\ s a -> s{_gsiStartingSequenceNumber = a});
--- | The name of the stream.
+-- | The timestamp of the data record from which to start reading. Used with
+-- shard iterator type AT_TIMESTAMP. A timestamp is the Unix epoch date
+-- with precision in milliseconds. For example,
+-- '2016-04-04T19:58:46.480-00:00' or '1459799926.480'. If a record with
+-- this exact timestamp does not exist, the iterator returned is for the
+-- next (later) record. If the timestamp is older than the current trim
+-- horizon, the iterator returned is for the oldest untrimmed data record
+-- (TRIM_HORIZON).
+gsiTimestamp :: Lens' GetShardIterator (Maybe UTCTime)
+gsiTimestamp = lens _gsiTimestamp (\ s a -> s{_gsiTimestamp = a}) . mapping _Time;
+
+-- | The name of the Amazon Kinesis stream.
gsiStreamName :: Lens' GetShardIterator Text
gsiStreamName = lens _gsiStreamName (\ s a -> s{_gsiStreamName = a});
--- | The shard ID of the shard to get the iterator for.
+-- | The shard ID of the Amazon Kinesis shard to get the iterator for.
gsiShardId :: Lens' GetShardIterator Text
gsiShardId = lens _gsiShardId (\ s a -> s{_gsiShardId = a});
-- | Determines how the shard iterator is used to start reading data records
-- from the shard.
--
--- The following are the valid shard iterator types:
+-- The following are the valid Amazon Kinesis shard iterator types:
--
--- - AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted
--- by a specific sequence number.
+-- - AT_SEQUENCE_NUMBER - Start reading from the position denoted by a
+-- specific sequence number, provided in the value
+-- 'StartingSequenceNumber'.
-- - AFTER_SEQUENCE_NUMBER - Start reading right after the position
--- denoted by a specific sequence number.
+-- denoted by a specific sequence number, provided in the value
+-- 'StartingSequenceNumber'.
+-- - AT_TIMESTAMP - Start reading from the position denoted by a specific
+-- timestamp, provided in the value 'Timestamp'.
-- - TRIM_HORIZON - Start reading at the last untrimmed record in the
-- shard in the system, which is the oldest data record in the shard.
-- - LATEST - Start reading just after the most recent record in the
@@ -159,6 +181,8 @@ instance AWSRequest GetShardIterator where
instance Hashable GetShardIterator
+instance NFData GetShardIterator
+
instance ToHeaders GetShardIterator where
toHeaders
= const
@@ -174,6 +198,7 @@ instance ToJSON GetShardIterator where
(catMaybes
[("StartingSequenceNumber" .=) <$>
_gsiStartingSequenceNumber,
+ ("Timestamp" .=) <$> _gsiTimestamp,
Just ("StreamName" .= _gsiStreamName),
Just ("ShardId" .= _gsiShardId),
Just ("ShardIteratorType" .= _gsiShardIteratorType)])
@@ -217,3 +242,5 @@ gsirsShardIterator = lens _gsirsShardIterator (\ s a -> s{_gsirsShardIterator =
-- | The response status code.
gsirsResponseStatus :: Lens' GetShardIteratorResponse Int
gsirsResponseStatus = lens _gsirsResponseStatus (\ s a -> s{_gsirsResponseStatus = a});
+
+instance NFData GetShardIteratorResponse
diff --git a/gen/Network/AWS/Kinesis/IncreaseStreamRetentionPeriod.hs b/gen/Network/AWS/Kinesis/IncreaseStreamRetentionPeriod.hs
index 605ed60..da03073 100644
--- a/gen/Network/AWS/Kinesis/IncreaseStreamRetentionPeriod.hs
+++ b/gen/Network/AWS/Kinesis/IncreaseStreamRetentionPeriod.hs
@@ -18,15 +18,16 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Increases the stream\'s retention period, which is the length of time
--- data records are accessible after they are added to the stream. The
--- maximum value of a stream’s retention period is 168 hours (7 days).
+-- Increases the Amazon Kinesis stream\'s retention period, which is the
+-- length of time data records are accessible after they are added to the
+-- stream. The maximum value of a stream\'s retention period is 168 hours
+-- (7 days).
--
-- Upon choosing a longer stream retention period, this operation will
-- increase the time period records are accessible that have not yet
-- expired. However, it will not make previous data that has expired (older
--- than the stream’s previous retention period) accessible after the
--- operation has been called. For example, if a stream’s retention period
+-- than the stream\'s previous retention period) accessible after the
+-- operation has been called. For example, if a stream\'s retention period
-- is set to 24 hours and is increased to 168 hours, any data that is older
-- than 24 hours will remain inaccessible to consumer applications.
module Network.AWS.Kinesis.IncreaseStreamRetentionPeriod
@@ -94,6 +95,8 @@ instance AWSRequest IncreaseStreamRetentionPeriod
instance Hashable IncreaseStreamRetentionPeriod
+instance NFData IncreaseStreamRetentionPeriod
+
instance ToHeaders IncreaseStreamRetentionPeriod
where
toHeaders
@@ -130,3 +133,5 @@ data IncreaseStreamRetentionPeriodResponse =
increaseStreamRetentionPeriodResponse
:: IncreaseStreamRetentionPeriodResponse
increaseStreamRetentionPeriodResponse = IncreaseStreamRetentionPeriodResponse'
+
+instance NFData IncreaseStreamRetentionPeriodResponse
diff --git a/gen/Network/AWS/Kinesis/ListStreams.hs b/gen/Network/AWS/Kinesis/ListStreams.hs
index c3fe935..fd78831 100644
--- a/gen/Network/AWS/Kinesis/ListStreams.hs
+++ b/gen/Network/AWS/Kinesis/ListStreams.hs
@@ -18,7 +18,7 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Lists your streams.
+-- Lists your Amazon Kinesis streams.
--
-- The number of streams may be too large to return from a single call to
-- 'ListStreams'. You can limit the number of returned streams using the
@@ -116,6 +116,8 @@ instance AWSRequest ListStreams where
instance Hashable ListStreams
+instance NFData ListStreams
+
instance ToHeaders ListStreams where
toHeaders
= const
@@ -180,3 +182,5 @@ lsrsStreamNames = lens _lsrsStreamNames (\ s a -> s{_lsrsStreamNames = a}) . _Co
-- | If set to 'true', there are more streams available to list.
lsrsHasMoreStreams :: Lens' ListStreamsResponse Bool
lsrsHasMoreStreams = lens _lsrsHasMoreStreams (\ s a -> s{_lsrsHasMoreStreams = a});
+
+instance NFData ListStreamsResponse
diff --git a/gen/Network/AWS/Kinesis/ListTagsForStream.hs b/gen/Network/AWS/Kinesis/ListTagsForStream.hs
index 60c0c85..122888d 100644
--- a/gen/Network/AWS/Kinesis/ListTagsForStream.hs
+++ b/gen/Network/AWS/Kinesis/ListTagsForStream.hs
@@ -102,6 +102,8 @@ instance AWSRequest ListTagsForStream where
instance Hashable ListTagsForStream
+instance NFData ListTagsForStream
+
instance ToHeaders ListTagsForStream where
toHeaders
= const
@@ -168,3 +170,5 @@ ltfsrsTags = lens _ltfsrsTags (\ s a -> s{_ltfsrsTags = a}) . _Coerce;
-- set 'ExclusiveStartTagKey' to the key of the last tag returned.
ltfsrsHasMoreTags :: Lens' ListTagsForStreamResponse Bool
ltfsrsHasMoreTags = lens _ltfsrsHasMoreTags (\ s a -> s{_ltfsrsHasMoreTags = a});
+
+instance NFData ListTagsForStreamResponse
diff --git a/gen/Network/AWS/Kinesis/MergeShards.hs b/gen/Network/AWS/Kinesis/MergeShards.hs
index d02c0fd..abea0ab 100644
--- a/gen/Network/AWS/Kinesis/MergeShards.hs
+++ b/gen/Network/AWS/Kinesis/MergeShards.hs
@@ -18,22 +18,22 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Merges two adjacent shards in a stream and combines them into a single
--- shard to reduce the stream\'s capacity to ingest and transport data. Two
--- shards are considered adjacent if the union of the hash key ranges for
--- the two shards form a contiguous set with no gaps. For example, if you
--- have two shards, one with a hash key range of 276...381 and the other
--- with a hash key range of 382...454, then you could merge these two
--- shards into a single shard that would have a hash key range of
--- 276...454. After the merge, the single child shard receives data for all
--- hash key values covered by the two parent shards.
+-- Merges two adjacent shards in an Amazon Kinesis stream and combines them
+-- into a single shard to reduce the stream\'s capacity to ingest and
+-- transport data. Two shards are considered adjacent if the union of the
+-- hash key ranges for the two shards form a contiguous set with no gaps.
+-- For example, if you have two shards, one with a hash key range of
+-- 276...381 and the other with a hash key range of 382...454, then you
+-- could merge these two shards into a single shard that would have a hash
+-- key range of 276...454. After the merge, the single child shard receives
+-- data for all hash key values covered by the two parent shards.
--
-- 'MergeShards' is called when there is a need to reduce the overall
-- capacity of a stream because of excess capacity that is not being used.
-- You must specify the shard to be merged and the adjacent shard for a
-- stream. For more information about merging shards, see
-- <http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html Merge Two Shards>
--- in the /Amazon Kinesis Developer Guide/.
+-- in the /Amazon Kinesis Streams Developer Guide/.
--
-- If the stream is in the 'ACTIVE' state, you can call 'MergeShards'. If a
-- stream is in the 'CREATING', 'UPDATING', or 'DELETING' state,
@@ -130,6 +130,8 @@ instance AWSRequest MergeShards where
instance Hashable MergeShards
+instance NFData MergeShards
+
instance ToHeaders MergeShards where
toHeaders
= const
@@ -164,3 +166,5 @@ data MergeShardsResponse =
mergeShardsResponse
:: MergeShardsResponse
mergeShardsResponse = MergeShardsResponse'
+
+instance NFData MergeShardsResponse
diff --git a/gen/Network/AWS/Kinesis/PutRecord.hs b/gen/Network/AWS/Kinesis/PutRecord.hs
index fc0564e..08108d5 100644
--- a/gen/Network/AWS/Kinesis/PutRecord.hs
+++ b/gen/Network/AWS/Kinesis/PutRecord.hs
@@ -18,11 +18,11 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Writes a single data record from a producer into an Amazon Kinesis
--- stream. Call 'PutRecord' to send data from the producer into the Amazon
--- Kinesis stream for real-time ingestion and subsequent processing, one
--- record at a time. Each shard can support writes up to 1,000 records per
--- second, up to a maximum data write total of 1 MB per second.
+-- Writes a single data record into an Amazon Kinesis stream. Call
+-- 'PutRecord' to send data into the stream for real-time ingestion and
+-- subsequent processing, one record at a time. Each shard can support
+-- writes up to 1,000 records per second, up to a maximum data write total
+-- of 1 MB per second.
--
-- You must specify the name of the stream that captures, stores, and
-- transports the data; a partition key; and the data blob itself.
@@ -31,7 +31,7 @@
-- file, geographic\/location data, website clickstream data, and so on.
--
-- The partition key is used by Amazon Kinesis to distribute data across
--- shards. Amazon Kinesis segregates the data records that belong to a data
+-- shards. Amazon Kinesis segregates the data records that belong to a
-- stream into multiple shards, using the partition key associated with
-- each data record to determine which shard a given data record belongs
-- to.
@@ -43,25 +43,24 @@
-- the partition key to determine the shard by explicitly specifying a hash
-- value using the 'ExplicitHashKey' parameter. For more information, see
-- <http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream Adding Data to a Stream>
--- in the /Amazon Kinesis Developer Guide/.
+-- in the /Amazon Kinesis Streams Developer Guide/.
--
-- 'PutRecord' returns the shard ID of where the data record was placed and
-- the sequence number that was assigned to the data record.
--
--- Sequence numbers generally increase over time. To guarantee strictly
--- increasing ordering, use the 'SequenceNumberForOrdering' parameter. For
--- more information, see
+-- Sequence numbers increase over time and are specific to a shard within a
+-- stream, not across all shards within a stream. To guarantee strictly
+-- increasing ordering, write serially to a shard and use the
+-- 'SequenceNumberForOrdering' parameter. For more information, see
-- <http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream Adding Data to a Stream>
--- in the /Amazon Kinesis Developer Guide/.
+-- in the /Amazon Kinesis Streams Developer Guide/.
--
-- If a 'PutRecord' request cannot be processed because of insufficient
-- provisioned throughput on the shard involved in the request, 'PutRecord'
-- throws 'ProvisionedThroughputExceededException'.
--
--- By default, data records are accessible for only 24 hours from the time
--- that they are added to an Amazon Kinesis stream. This retention period
--- can be modified using the < DecreaseStreamRetentionPeriod> and
--- < IncreaseStreamRetentionPeriod> operations.
+-- Data records are accessible for only 24 hours from the time that they
+-- are added to a stream.
module Network.AWS.Kinesis.PutRecord
(
-- * Creating a Request
@@ -166,8 +165,8 @@ prData = lens _prData (\ s a -> s{_prData = a}) . _Base64;
-- specific shard. Specifically, an MD5 hash function is used to map
-- partition keys to 128-bit integer values and to map associated data
-- records to shards. As a result of this hashing mechanism, all data
--- records with the same partition key will map to the same shard within
--- the stream.
+-- records with the same partition key map to the same shard within the
+-- stream.
prPartitionKey :: Lens' PutRecord Text
prPartitionKey = lens _prPartitionKey (\ s a -> s{_prPartitionKey = a});
@@ -183,6 +182,8 @@ instance AWSRequest PutRecord where
instance Hashable PutRecord
+instance NFData PutRecord
+
instance ToHeaders PutRecord where
toHeaders
= const
@@ -253,3 +254,5 @@ prrsShardId = lens _prrsShardId (\ s a -> s{_prrsShardId = a});
-- put into the stream.
prrsSequenceNumber :: Lens' PutRecordResponse Text
prrsSequenceNumber = lens _prrsSequenceNumber (\ s a -> s{_prrsSequenceNumber = a});
+
+instance NFData PutRecordResponse
diff --git a/gen/Network/AWS/Kinesis/PutRecords.hs b/gen/Network/AWS/Kinesis/PutRecords.hs
index 3616154..3c22093 100644
--- a/gen/Network/AWS/Kinesis/PutRecords.hs
+++ b/gen/Network/AWS/Kinesis/PutRecords.hs
@@ -18,10 +18,9 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Writes multiple data records from a producer into an Amazon Kinesis
--- stream in a single call (also referred to as a 'PutRecords' request).
--- Use this operation to send data from a data producer into the Amazon
--- Kinesis stream for data ingestion and processing.
+-- Writes multiple data records into an Amazon Kinesis stream in a single
+-- call (also referred to as a 'PutRecords' request). Use this operation to
+-- send data into the stream for data ingestion and processing.
--
-- Each 'PutRecords' request can support up to 500 records. Each record in
-- the request can be as large as 1 MB, up to a limit of 5 MB for the
@@ -44,14 +43,14 @@
-- hashing mechanism, all data records with the same partition key map to
-- the same shard within the stream. For more information, see
-- <http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream Adding Data to a Stream>
--- in the /Amazon Kinesis Developer Guide/.
+-- in the /Amazon Kinesis Streams Developer Guide/.
--
-- Each record in the 'Records' array may include an optional parameter,
-- 'ExplicitHashKey', which overrides the partition key to shard mapping.
-- This parameter allows a data producer to determine explicitly the shard
-- where the record is stored. For more information, see
-- <http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords Adding Multiple Records with PutRecords>
--- in the /Amazon Kinesis Developer Guide/.
+-- in the /Amazon Kinesis Streams Developer Guide/.
--
-- The 'PutRecords' response includes an array of response 'Records'. Each
-- record in the response array directly correlates with a record in the
@@ -78,7 +77,7 @@
-- throttled. For more information about partially successful responses,
-- see
-- <http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords Adding Multiple Records with PutRecords>
--- in the /Amazon Kinesis Developer Guide/.
+-- in the /Amazon Kinesis Streams Developer Guide/.
--
-- By default, data records are accessible for only 24 hours from the time
-- that they are added to an Amazon Kinesis stream. This retention period
@@ -154,6 +153,8 @@ instance AWSRequest PutRecords where
instance Hashable PutRecords
+instance NFData PutRecords
+
instance ToHeaders PutRecords where
toHeaders
= const
@@ -216,9 +217,10 @@ prsResponseStatus = lens _prsResponseStatus (\ s a -> s{_prsResponseStatus = a})
-- | An array of successfully and unsuccessfully processed record results,
-- correlated with the request by natural ordering. A record that is
--- successfully added to your Amazon Kinesis stream includes
--- 'SequenceNumber' and 'ShardId' in the result. A record that fails to be
--- added to your Amazon Kinesis stream includes 'ErrorCode' and
--- 'ErrorMessage' in the result.
+-- successfully added to a stream includes 'SequenceNumber' and 'ShardId'
+-- in the result. A record that fails to be added to a stream includes
+-- 'ErrorCode' and 'ErrorMessage' in the result.
prsRecords :: Lens' PutRecordsResponse (NonEmpty PutRecordsResultEntry)
prsRecords = lens _prsRecords (\ s a -> s{_prsRecords = a}) . _List1;
+
+instance NFData PutRecordsResponse
diff --git a/gen/Network/AWS/Kinesis/RemoveTagsFromStream.hs b/gen/Network/AWS/Kinesis/RemoveTagsFromStream.hs
index 7cb2c39..f77654a 100644
--- a/gen/Network/AWS/Kinesis/RemoveTagsFromStream.hs
+++ b/gen/Network/AWS/Kinesis/RemoveTagsFromStream.hs
@@ -18,7 +18,9 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Deletes tags from the specified Amazon Kinesis stream.
+-- Removes tags from the specified Amazon Kinesis stream. Removed tags are
+-- deleted and cannot be recovered after this operation successfully
+-- completes.
--
-- If you specify a tag that does not exist, it is ignored.
module Network.AWS.Kinesis.RemoveTagsFromStream
@@ -83,6 +85,8 @@ instance AWSRequest RemoveTagsFromStream where
instance Hashable RemoveTagsFromStream
+instance NFData RemoveTagsFromStream
+
instance ToHeaders RemoveTagsFromStream where
toHeaders
= const
@@ -116,3 +120,5 @@ data RemoveTagsFromStreamResponse =
removeTagsFromStreamResponse
:: RemoveTagsFromStreamResponse
removeTagsFromStreamResponse = RemoveTagsFromStreamResponse'
+
+instance NFData RemoveTagsFromStreamResponse
diff --git a/gen/Network/AWS/Kinesis/SplitShard.hs b/gen/Network/AWS/Kinesis/SplitShard.hs
index 86140b4..aa3e29b 100644
--- a/gen/Network/AWS/Kinesis/SplitShard.hs
+++ b/gen/Network/AWS/Kinesis/SplitShard.hs
@@ -18,17 +18,18 @@
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
--- Splits a shard into two new shards in the stream, to increase the
--- stream\'s capacity to ingest and transport data. 'SplitShard' is called
--- when there is a need to increase the overall capacity of stream because
--- of an expected increase in the volume of data records being ingested.
+-- Splits a shard into two new shards in the Amazon Kinesis stream to
+-- increase the stream\'s capacity to ingest and transport data.
+-- 'SplitShard' is called when there is a need to increase the overall
+-- capacity of a stream because of an expected increase in the volume of
+-- data records being ingested.
--
-- You can also use 'SplitShard' when a shard appears to be approaching its
--- maximum utilization, for example, when the set of producers sending data
--- into the specific shard are suddenly sending more than previously
--- anticipated. You can also call 'SplitShard' to increase stream capacity,
--- so that more Amazon Kinesis applications can simultaneously read data
--- from the stream for real-time processing.
+-- maximum utilization; for example, the producers sending data into the
+-- specific shard are suddenly sending more than previously anticipated.
+-- You can also call 'SplitShard' to increase stream capacity, so that more
+-- Amazon Kinesis applications can simultaneously read data from the stream
+-- for real-time processing.
--
-- You must specify the shard to be split and the new hash key, which is
-- the position in the shard where the shard gets split in two. In many
@@ -36,7 +37,7 @@
-- ending hash key, but it can be any hash key value in the range being
-- mapped into the shard. For more information about splitting shards, see
-- <http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html Split a Shard>
--- in the /Amazon Kinesis Developer Guide/.
+-- in the /Amazon Kinesis Streams Developer Guide/.
--
-- You can use < DescribeStream> to determine the shard ID and hash key
-- values for the 'ShardToSplit' and 'NewStartingHashKey' parameters that
@@ -58,13 +59,14 @@
-- authorized for your account, you receive a 'LimitExceededException'.
--
-- For the default shard limit for an AWS account, see
--- <http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html Amazon Kinesis Limits>.
--- If you need to increase this limit,
+-- <http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html Streams Limits>
+-- in the /Amazon Kinesis Streams Developer Guide/. If you need to increase
+-- this limit,
-- <http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html contact AWS Support>.
--
--- If you try to operate on too many streams in parallel using
--- < CreateStream>, < DeleteStream>, < MergeShards> or < SplitShard>, you
--- receive a 'LimitExceededException'.
+-- If you try to operate on too many streams simultaneously using
+-- < CreateStream>, < DeleteStream>, < MergeShards>, and\/or < SplitShard>,
+-- you receive a 'LimitExceededException'.
--
-- 'SplitShard' has limit of 5 transactions per second per account.
module Network.AWS.Kinesis.SplitShard
@@ -145,6 +147,8 @@ instance AWSRequest SplitShard where
instance Hashable SplitShard
+instance NFData SplitShard
+
instance ToHeaders SplitShard where
toHeaders
= const
@@ -179,3 +183,5 @@ data SplitShardResponse =
splitShardResponse
:: SplitShardResponse
splitShardResponse = SplitShardResponse'
+
+instance NFData SplitShardResponse
diff --git a/gen/Network/AWS/Kinesis/Types.hs b/gen/Network/AWS/Kinesis/Types.hs
index e296c65..9d95f45 100644
--- a/gen/Network/AWS/Kinesis/Types.hs
+++ b/gen/Network/AWS/Kinesis/Types.hs
@@ -23,12 +23,27 @@ module Network.AWS.Kinesis.Types
, _LimitExceededException
, _ResourceInUseException
+ -- * MetricsName
+ , MetricsName (..)
+
-- * ShardIteratorType
, ShardIteratorType (..)
-- * StreamStatus
, StreamStatus (..)
+ -- * EnhancedMetrics
+ , EnhancedMetrics
+ , enhancedMetrics
+ , emShardLevelMetrics
+
+ -- * EnhancedMonitoringOutput
+ , EnhancedMonitoringOutput
+ , enhancedMonitoringOutput
+ , emoDesiredShardLevelMetrics
+ , emoCurrentShardLevelMetrics
+ , emoStreamName
+
-- * HashKeyRange
, HashKeyRange
, hashKeyRange
@@ -82,6 +97,7 @@ module Network.AWS.Kinesis.Types
, sdShards
, sdHasMoreShards
, sdRetentionPeriodHours
+ , sdEnhancedMonitoring
-- * Tag
, Tag
@@ -123,6 +139,8 @@ kinesis =
| has (hasCode "ThrottlingException" . hasStatus 400) e =
Just "throttling_exception"
| has (hasCode "Throttling" . hasStatus 400) e = Just "throttling"
+ | has (hasStatus 504) e = Just "gateway_timeout"
+ | has (hasStatus 502) e = Just "bad_gateway"
| has (hasStatus 503) e = Just "service_unavailable"
| has (hasStatus 500) e = Just "general_server_error"
| has (hasStatus 509) e = Just "limit_exceeded"
@@ -137,9 +155,11 @@ _ExpiredIteratorException = _ServiceError . hasCode "ExpiredIteratorException"
_InvalidArgumentException :: AsError a => Getting (First ServiceError) a ServiceError
_InvalidArgumentException = _ServiceError . hasCode "InvalidArgumentException"
--- | The request rate is too high, or the requested data is too large for the
--- available throughput. Reduce the frequency or size of your requests. For
--- more information, see
+-- | The request rate for the stream is too high, or the requested data is
+-- too large for the available throughput. Reduce the frequency or size of
+-- your requests. For more information, see
+-- <http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html Streams Limits>
+-- in the /Amazon Kinesis Streams Developer Guide/, and
-- <http://docs.aws.amazon.com/general/latest/gr/api-retries.html Error Retries and Exponential Backoff in AWS>
-- in the /AWS General Reference/.
_ProvisionedThroughputExceededException :: AsError a => Getting (First ServiceError) a ServiceError
diff --git a/gen/Network/AWS/Kinesis/Types/Product.hs b/gen/Network/AWS/Kinesis/Types/Product.hs
index 9e5be9c..89575ce 100644
--- a/gen/Network/AWS/Kinesis/Types/Product.hs
+++ b/gen/Network/AWS/Kinesis/Types/Product.hs
@@ -21,6 +21,110 @@ import Network.AWS.Kinesis.Types.Sum
import Network.AWS.Lens
import Network.AWS.Prelude
+-- | Represents enhanced metrics types.
+--
+-- /See:/ 'enhancedMetrics' smart constructor.
+newtype EnhancedMetrics = EnhancedMetrics'
+ { _emShardLevelMetrics :: Maybe (List1 MetricsName)
+ } deriving (Eq,Read,Show,Data,Typeable,Generic)
+
+-- | Creates a value of 'EnhancedMetrics' with the minimum fields required to make a request.
+--
+-- Use one of the following lenses to modify other fields as desired:
+--
+-- * 'emShardLevelMetrics'
+enhancedMetrics
+ :: EnhancedMetrics
+enhancedMetrics =
+ EnhancedMetrics'
+ { _emShardLevelMetrics = Nothing
+ }
+
+-- | List of shard-level metrics.
+--
+-- The following are the valid shard-level metrics. The value \"'ALL'\"
+-- enhances every metric.
+--
+-- - 'IncomingBytes'
+-- - 'IncomingRecords'
+-- - 'OutgoingBytes'
+-- - 'OutgoingRecords'
+-- - 'WriteProvisionedThroughputExceeded'
+-- - 'ReadProvisionedThroughputExceeded'
+-- - 'IteratorAgeMilliseconds'
+-- - 'ALL'
+--
+-- For more information, see
+-- <http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html Monitoring the Amazon Kinesis Streams Service with Amazon CloudWatch>
+-- in the /Amazon Kinesis Streams Developer Guide/.
+emShardLevelMetrics :: Lens' EnhancedMetrics (Maybe (NonEmpty MetricsName))
+emShardLevelMetrics = lens _emShardLevelMetrics (\ s a -> s{_emShardLevelMetrics = a}) . mapping _List1;
+
+instance FromJSON EnhancedMetrics where
+ parseJSON
+ = withObject "EnhancedMetrics"
+ (\ x ->
+ EnhancedMetrics' <$> (x .:? "ShardLevelMetrics"))
+
+instance Hashable EnhancedMetrics
+
+instance NFData EnhancedMetrics
+
+-- | Represents the output for < EnableEnhancedMonitoring> and
+-- < DisableEnhancedMonitoring>.
+--
+-- /See:/ 'enhancedMonitoringOutput' smart constructor.
+data EnhancedMonitoringOutput = EnhancedMonitoringOutput'
+ { _emoDesiredShardLevelMetrics :: !(Maybe (List1 MetricsName))
+ , _emoCurrentShardLevelMetrics :: !(Maybe (List1 MetricsName))
+ , _emoStreamName :: !(Maybe Text)
+ } deriving (Eq,Read,Show,Data,Typeable,Generic)
+
+-- | Creates a value of 'EnhancedMonitoringOutput' with the minimum fields required to make a request.
+--
+-- Use one of the following lenses to modify other fields as desired:
+--
+-- * 'emoDesiredShardLevelMetrics'
+--
+-- * 'emoCurrentShardLevelMetrics'
+--
+-- * 'emoStreamName'
+enhancedMonitoringOutput
+ :: EnhancedMonitoringOutput
+enhancedMonitoringOutput =
+ EnhancedMonitoringOutput'
+ { _emoDesiredShardLevelMetrics = Nothing
+ , _emoCurrentShardLevelMetrics = Nothing
+ , _emoStreamName = Nothing
+ }
+
+-- | Represents the list of all the metrics that would be in the enhanced
+-- state after the operation.
+emoDesiredShardLevelMetrics :: Lens' EnhancedMonitoringOutput (Maybe (NonEmpty MetricsName))
+emoDesiredShardLevelMetrics = lens _emoDesiredShardLevelMetrics (\ s a -> s{_emoDesiredShardLevelMetrics = a}) . mapping _List1;
+
+-- | Represents the current state of the metrics that are in the enhanced
+-- state before the operation.
+emoCurrentShardLevelMetrics :: Lens' EnhancedMonitoringOutput (Maybe (NonEmpty MetricsName))
+emoCurrentShardLevelMetrics = lens _emoCurrentShardLevelMetrics (\ s a -> s{_emoCurrentShardLevelMetrics = a}) . mapping _List1;
+
+-- | The name of the Amazon Kinesis stream.
+emoStreamName :: Lens' EnhancedMonitoringOutput (Maybe Text)
+emoStreamName = lens _emoStreamName (\ s a -> s{_emoStreamName = a});
+
+instance FromJSON EnhancedMonitoringOutput where
+ parseJSON
+ = withObject "EnhancedMonitoringOutput"
+ (\ x ->
+ EnhancedMonitoringOutput' <$>
+ (x .:? "DesiredShardLevelMetrics") <*>
+ (x .:? "CurrentShardLevelMetrics")
+ <*> (x .:? "StreamName"))
+
+instance Hashable EnhancedMonitoringOutput
+
+instance NFData EnhancedMonitoringOutput
+
-- | The range of possible hash key values for the shard, which is a set of
-- ordered contiguous positive integers.
--
@@ -64,6 +168,8 @@ instance FromJSON HashKeyRange where
instance Hashable HashKeyRange
+instance NFData HashKeyRange
+
-- | Represents the output for 'PutRecords'.
--
-- /See:/ 'putRecordsRequestEntry' smart constructor.
@@ -125,6 +231,8 @@ prrePartitionKey = lens _prrePartitionKey (\ s a -> s{_prrePartitionKey = a});
instance Hashable PutRecordsRequestEntry
+instance NFData PutRecordsRequestEntry
+
instance ToJSON PutRecordsRequestEntry where
toJSON PutRecordsRequestEntry'{..}
= object
@@ -134,10 +242,10 @@ instance ToJSON PutRecordsRequestEntry where
Just ("PartitionKey" .= _prrePartitionKey)])
-- | Represents the result of an individual record from a 'PutRecords'
--- request. A record that is successfully added to your Amazon Kinesis
--- stream includes SequenceNumber and ShardId in the result. A record that
--- fails to be added to your Amazon Kinesis stream includes ErrorCode and
--- ErrorMessage in the result.
+-- request. A record that is successfully added to a stream includes
+-- 'SequenceNumber' and 'ShardId' in the result. A record that fails to be
+-- added to the stream includes 'ErrorCode' and 'ErrorMessage' in the
+-- result.
--
-- /See:/ 'putRecordsResultEntry' smart constructor.
data PutRecordsResultEntry = PutRecordsResultEntry'
@@ -200,6 +308,8 @@ instance FromJSON PutRecordsResultEntry where
instance Hashable PutRecordsResultEntry
+instance NFData PutRecordsResultEntry
+
-- | The unit of data of the Amazon Kinesis stream, which is composed of a
-- sequence number, a partition key, and a data blob.
--
@@ -273,6 +383,8 @@ instance FromJSON Record where
instance Hashable Record
+instance NFData Record
+
-- | The range of possible sequence numbers for the shard.
--
-- /See:/ 'sequenceNumberRange' smart constructor.
@@ -316,6 +428,8 @@ instance FromJSON SequenceNumberRange where
instance Hashable SequenceNumberRange
+instance NFData SequenceNumberRange
+
-- | A uniquely identified group of data records in an Amazon Kinesis stream.
--
-- /See:/ 'shard' smart constructor.
@@ -354,15 +468,15 @@ shard pShardId_ pHashKeyRange_ pSequenceNumberRange_ =
, _sSequenceNumberRange = pSequenceNumberRange_
}
--- | The shard Id of the shard adjacent to the shard\'s parent.
+-- | The shard ID of the shard adjacent to the shard\'s parent.
sAdjacentParentShardId :: Lens' Shard (Maybe Text)
sAdjacentParentShardId = lens _sAdjacentParentShardId (\ s a -> s{_sAdjacentParentShardId = a});
--- | The shard Id of the shard\'s parent.
+-- | The shard ID of the shard\'s parent.
sParentShardId :: Lens' Shard (Maybe Text)
sParentShardId = lens _sParentShardId (\ s a -> s{_sParentShardId = a});
--- | The unique identifier of the shard within the Amazon Kinesis stream.
+-- | The unique identifier of the shard within the stream.
sShardId :: Lens' Shard Text
sShardId = lens _sShardId (\ s a -> s{_sShardId = a});
@@ -388,6 +502,8 @@ instance FromJSON Shard where
instance Hashable Shard
+instance NFData Shard
+
-- | Represents the output for < DescribeStream>.
--
-- /See:/ 'streamDescription' smart constructor.
@@ -398,6 +514,7 @@ data StreamDescription = StreamDescription'
, _sdShards :: ![Shard]
, _sdHasMoreShards :: !Bool
, _sdRetentionPeriodHours :: !Nat
+ , _sdEnhancedMonitoring :: ![EnhancedMetrics]
} deriving (Eq,Read,Show,Data,Typeable,Generic)
-- | Creates a value of 'StreamDescription' with the minimum fields required to make a request.
@@ -415,6 +532,8 @@ data StreamDescription = StreamDescription'
-- * 'sdHasMoreShards'
--
-- * 'sdRetentionPeriodHours'
+--
+-- * 'sdEnhancedMonitoring'
streamDescription
:: Text -- ^ 'sdStreamName'
-> Text -- ^ 'sdStreamARN'
@@ -430,6 +549,7 @@ streamDescription pStreamName_ pStreamARN_ pStreamStatus_ pHasMoreShards_ pReten
, _sdShards = mempty
, _sdHasMoreShards = pHasMoreShards_
, _sdRetentionPeriodHours = _Nat # pRetentionPeriodHours_
+ , _sdEnhancedMonitoring = mempty
}
-- | The name of the stream being described.
@@ -440,9 +560,8 @@ sdStreamName = lens _sdStreamName (\ s a -> s{_sdStreamName = a});
sdStreamARN :: Lens' StreamDescription Text
sdStreamARN = lens _sdStreamARN (\ s a -> s{_sdStreamARN = a});
--- | The current status of the stream being described.
---
--- The stream status is one of the following states:
+-- | The current status of the stream being described. The stream status is
+-- one of the following states:
--
-- - 'CREATING' - The stream is being created. Amazon Kinesis immediately
-- returns and sets 'StreamStatus' to 'CREATING'.
@@ -469,6 +588,10 @@ sdHasMoreShards = lens _sdHasMoreShards (\ s a -> s{_sdHasMoreShards = a});
sdRetentionPeriodHours :: Lens' StreamDescription Natural
sdRetentionPeriodHours = lens _sdRetentionPeriodHours (\ s a -> s{_sdRetentionPeriodHours = a}) . _Nat;
+-- | Represents the current enhanced monitoring settings of the stream.
+sdEnhancedMonitoring :: Lens' StreamDescription [EnhancedMetrics]
+sdEnhancedMonitoring = lens _sdEnhancedMonitoring (\ s a -> s{_sdEnhancedMonitoring = a}) . _Coerce;
+
instance FromJSON StreamDescription where
parseJSON
= withObject "StreamDescription"
@@ -478,10 +601,13 @@ instance FromJSON StreamDescription where
(x .: "StreamStatus")
<*> (x .:? "Shards" .!= mempty)
<*> (x .: "HasMoreShards")
- <*> (x .: "RetentionPeriodHours"))
+ <*> (x .: "RetentionPeriodHours")
+ <*> (x .:? "EnhancedMonitoring" .!= mempty))
instance Hashable StreamDescription
+instance NFData StreamDescription
+
-- | Metadata assigned to the stream, consisting of a key-value pair.
--
-- /See:/ 'tag' smart constructor.
@@ -523,3 +649,5 @@ instance FromJSON Tag where
(\ x -> Tag' <$> (x .:? "Value") <*> (x .: "Key"))
instance Hashable Tag
+
+instance NFData Tag
diff --git a/gen/Network/AWS/Kinesis/Types/Sum.hs b/gen/Network/AWS/Kinesis/Types/Sum.hs
index 19b408a..953dff5 100644
--- a/gen/Network/AWS/Kinesis/Types/Sum.hs
+++ b/gen/Network/AWS/Kinesis/Types/Sum.hs
@@ -19,9 +19,57 @@ module Network.AWS.Kinesis.Types.Sum where
import Network.AWS.Prelude
+data MetricsName
+ = All
+ | IncomingBytes
+ | IncomingRecords
+ | IteratorAgeMilliseconds
+ | OutgoingBytes
+ | OutgoingRecords
+ | ReadProvisionedThroughputExceeded
+ | WriteProvisionedThroughputExceeded
+ deriving (Eq,Ord,Read,Show,Enum,Bounded,Data,Typeable,Generic)
+
+instance FromText MetricsName where
+ parser = takeLowerText >>= \case
+ "all" -> pure All
+ "incomingbytes" -> pure IncomingBytes
+ "incomingrecords" -> pure IncomingRecords
+ "iteratoragemilliseconds" -> pure IteratorAgeMilliseconds
+ "outgoingbytes" -> pure OutgoingBytes
+ "outgoingrecords" -> pure OutgoingRecords
+ "readprovisionedthroughputexceeded" -> pure ReadProvisionedThroughputExceeded
+ "writeprovisionedthroughputexceeded" -> pure WriteProvisionedThroughputExceeded
+ e -> fromTextError $ "Failure parsing MetricsName from value: '" <> e
+ <> "'. Accepted values: ALL, IncomingBytes, IncomingRecords, IteratorAgeMilliseconds, OutgoingBytes, OutgoingRecords, ReadProvisionedThroughputExceeded, WriteProvisionedThroughputExceeded"
+
+instance ToText MetricsName where
+ toText = \case
+ All -> "ALL"
+ IncomingBytes -> "IncomingBytes"
+ IncomingRecords -> "IncomingRecords"
+ IteratorAgeMilliseconds -> "IteratorAgeMilliseconds"
+ OutgoingBytes -> "OutgoingBytes"
+ OutgoingRecords -> "OutgoingRecords"
+ ReadProvisionedThroughputExceeded -> "ReadProvisionedThroughputExceeded"
+ WriteProvisionedThroughputExceeded -> "WriteProvisionedThroughputExceeded"
+
+instance Hashable MetricsName
+instance NFData MetricsName
+instance ToByteString MetricsName
+instance ToQuery MetricsName
+instance ToHeader MetricsName
+
+instance ToJSON MetricsName where
+ toJSON = toJSONText
+
+instance FromJSON MetricsName where
+ parseJSON = parseJSONText "MetricsName"
+
data ShardIteratorType
= AfterSequenceNumber
| AtSequenceNumber
+ | AtTimestamp
| Latest
| TrimHorizon
deriving (Eq,Ord,Read,Show,Enum,Bounded,Data,Typeable,Generic)
@@ -30,19 +78,22 @@ instance FromText ShardIteratorType where
parser = takeLowerText >>= \case
"after_sequence_number" -> pure AfterSequenceNumber
"at_sequence_number" -> pure AtSequenceNumber
+ "at_timestamp" -> pure AtTimestamp
"latest" -> pure Latest
"trim_horizon" -> pure TrimHorizon
e -> fromTextError $ "Failure parsing ShardIteratorType from value: '" <> e
- <> "'. Accepted values: AFTER_SEQUENCE_NUMBER, AT_SEQUENCE_NUMBER, LATEST, TRIM_HORIZON"
+ <> "'. Accepted values: AFTER_SEQUENCE_NUMBER, AT_SEQUENCE_NUMBER, AT_TIMESTAMP, LATEST, TRIM_HORIZON"
instance ToText ShardIteratorType where
toText = \case
AfterSequenceNumber -> "AFTER_SEQUENCE_NUMBER"
AtSequenceNumber -> "AT_SEQUENCE_NUMBER"
+ AtTimestamp -> "AT_TIMESTAMP"
Latest -> "LATEST"
TrimHorizon -> "TRIM_HORIZON"
instance Hashable ShardIteratorType
+instance NFData ShardIteratorType
instance ToByteString ShardIteratorType
instance ToQuery ShardIteratorType
instance ToHeader ShardIteratorType
@@ -74,6 +125,7 @@ instance ToText StreamStatus where
Updating -> "UPDATING"
instance Hashable StreamStatus
+instance NFData StreamStatus
instance ToByteString StreamStatus
instance ToQuery StreamStatus
instance ToHeader StreamStatus
diff --git a/test/Test/AWS/Gen/Kinesis.hs b/test/Test/AWS/Gen/Kinesis.hs
index 98e844a..a73e20b 100644
--- a/test/Test/AWS/Gen/Kinesis.hs
+++ b/test/Test/AWS/Gen/Kinesis.hs
@@ -43,6 +43,12 @@ import Test.AWS.Kinesis.Internal
-- , testGetRecords $
-- getRecords
--
+-- , testEnableEnhancedMonitoring $
+-- enableEnhancedMonitoring
+--
+-- , testDisableEnhancedMonitoring $
+-- disableEnhancedMonitoring
+--
-- , testListTagsForStream $
-- listTagsForStream
--
@@ -91,6 +97,12 @@ import Test.AWS.Kinesis.Internal
-- , testGetRecordsResponse $
-- getRecordsResponse
--
+-- , testEnableEnhancedMonitoringResponse $
+-- enhancedMonitoringOutput
+--
+-- , testDisableEnhancedMonitoringResponse $
+-- enhancedMonitoringOutput
+--
-- , testListTagsForStreamResponse $
-- listTagsForStreamResponse
--
@@ -151,6 +163,16 @@ testGetRecords = req
"GetRecords"
"fixture/GetRecords.yaml"
+testEnableEnhancedMonitoring :: EnableEnhancedMonitoring -> TestTree
+testEnableEnhancedMonitoring = req
+ "EnableEnhancedMonitoring"
+ "fixture/EnableEnhancedMonitoring.yaml"
+
+testDisableEnhancedMonitoring :: DisableEnhancedMonitoring -> TestTree
+testDisableEnhancedMonitoring = req
+ "DisableEnhancedMonitoring"
+ "fixture/DisableEnhancedMonitoring.yaml"
+
testListTagsForStream :: ListTagsForStream -> TestTree
testListTagsForStream = req
"ListTagsForStream"
@@ -238,6 +260,20 @@ testGetRecordsResponse = res
kinesis
(Proxy :: Proxy GetRecords)
+testEnableEnhancedMonitoringResponse :: EnhancedMonitoringOutput -> TestTree
+testEnableEnhancedMonitoringResponse = res
+ "EnableEnhancedMonitoringResponse"
+ "fixture/EnableEnhancedMonitoringResponse.proto"
+ kinesis
+ (Proxy :: Proxy EnableEnhancedMonitoring)
+
+testDisableEnhancedMonitoringResponse :: EnhancedMonitoringOutput -> TestTree
+testDisableEnhancedMonitoringResponse = res
+ "DisableEnhancedMonitoringResponse"
+ "fixture/DisableEnhancedMonitoringResponse.proto"
+ kinesis
+ (Proxy :: Proxy DisableEnhancedMonitoring)
+
testListTagsForStreamResponse :: ListTagsForStreamResponse -> TestTree
testListTagsForStreamResponse = res
"ListTagsForStreamResponse"