diff --git a/.github/workflows/integ-test.yml b/.github/workflows/integ-test.yml
index 2137f521864..8222a8b92be 100644
--- a/.github/workflows/integ-test.yml
+++ b/.github/workflows/integ-test.yml
@@ -6,7 +6,7 @@ on:
identifier:
required: true
type: string
- pull_request:
+ pull_request_target:
branches:
- main
@@ -67,6 +67,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
with:
+ ref: ${{ github.event.pull_request.head.sha || github.ref_name }}
persist-credentials: false
- name: Configure AWS Credentials
diff --git a/AWSAPIGateway.podspec b/AWSAPIGateway.podspec
index 24cf2e981ff..72033da8aaa 100644
--- a/AWSAPIGateway.podspec
+++ b/AWSAPIGateway.podspec
@@ -1,7 +1,7 @@
Pod::Spec.new do |s|
s.name = 'AWSAPIGateway'
- s.version = '2.33.10'
+ s.version = '2.34.0'
s.summary = 'Amazon Web Services SDK for iOS.'
s.description = 'The AWS SDK for iOS provides a library, code samples, and documentation for developers to build connected mobile applications using AWS.'
@@ -13,7 +13,7 @@ Pod::Spec.new do |s|
s.source = { :git => 'https://github.com/aws-amplify/aws-sdk-ios.git',
:tag => s.version}
s.requires_arc = true
- s.dependency 'AWSCore', '2.33.10'
+ s.dependency 'AWSCore', '2.34.0'
s.source_files = 'AWSAPIGateway/*.{h,m}'
end
diff --git a/AWSAPIGateway/AWSAPIGatewayClient.m b/AWSAPIGateway/AWSAPIGatewayClient.m
index 9799ea97324..8e49daea9d2 100644
--- a/AWSAPIGateway/AWSAPIGatewayClient.m
+++ b/AWSAPIGateway/AWSAPIGatewayClient.m
@@ -23,7 +23,7 @@
static NSString *const AWSAPIGatewayAPIKeyHeader = @"x-api-key";
-NSString *const AWSAPIGatewaySDKVersion = @"2.33.10";
+NSString *const AWSAPIGatewaySDKVersion = @"2.34.0";
static int defaultChunkSize = 1024;
diff --git a/AWSAPIGateway/Info.plist b/AWSAPIGateway/Info.plist
index 00a733ed3c8..f13059556e3 100644
--- a/AWSAPIGateway/Info.plist
+++ b/AWSAPIGateway/Info.plist
@@ -15,7 +15,7 @@
An instance maintenance policy.
+ */ +@property (nonatomic, strong) AWSAutoScalingInstanceMaintenancePolicy * _Nullable instanceMaintenancePolicy; + /**The EC2 instances associated with the group.
*/ @@ -1266,6 +1272,11 @@ typedef NS_ENUM(NSInteger, AWSAutoScalingWarmPoolStatus) { */ @property (nonatomic, strong) NSString * _Nullable instanceId; +/** +An instance maintenance policy. For more information, see Set instance maintenance policy in the Amazon EC2 Auto Scaling User Guide.
+ */ +@property (nonatomic, strong) AWSAutoScalingInstanceMaintenancePolicy * _Nullable instanceMaintenancePolicy; + /**The name of the launch configuration to use to launch instances.
Conditional: You must specify either a launch template (LaunchTemplate
or MixedInstancesPolicy
) or a launch configuration (LaunchConfigurationName
or InstanceId
).
Describes an instance maintenance policy.
For more information, see Set instance maintenance policy in the Amazon EC2 Auto Scaling User Guide.
+ */ +@interface AWSAutoScalingInstanceMaintenancePolicy : AWSModel + + +/** +Specifies the upper threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. Value range is 100 to 200. To clear a previously set value, specify a value of -1
.
Both MinHealthyPercentage
and MaxHealthyPercentage
must be specified, and the difference between them cannot be greater than 100. A large range increases the number of instances that can be replaced at the same time.
Specifies the lower threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload when replacing instances. Value range is 0 to 100. To clear a previously set value, specify a value of -1
.
The metadata options for the instances. For more information, see Configuring the Instance Metadata Options in the Amazon EC2 Auto Scaling User Guide.
*/ @@ -2895,6 +2924,11 @@ typedef NS_ENUM(NSInteger, AWSAutoScalingWarmPoolStatus) { */ @property (nonatomic, strong) NSArray[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice
is used and the value for that parameter defaults to 100
.
The minimum and maximum amount of memory per vCPU for an instance type, in GiB.
Default: No minimum or maximum limits
*/ @@ -2916,7 +2950,7 @@ typedef NS_ENUM(NSInteger, AWSAutoScalingWarmPoolStatus) { @property (nonatomic, strong) AWSAutoScalingNetworkInterfaceCountRequest * _Nullable networkInterfaceCount; /** -The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.
Default: 20
[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per instance price.
Default: 20
The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.
Default: 100
[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified.
Default: 100
This structure defines the CloudWatch metric to return, along with the statistic, period, and unit.
For more information about the CloudWatch terminology below, see Amazon CloudWatch concepts in the Amazon CloudWatch User Guide.
+This structure defines the CloudWatch metric to return, along with the statistic and unit.
For more information about the CloudWatch terminology below, see Amazon CloudWatch concepts in the Amazon CloudWatch User Guide.
Required parameters: [Metric, Stat] */ @interface AWSAutoScalingMetricStat : AWSModel @@ -4206,7 +4240,12 @@ typedef NS_ENUM(NSInteger, AWSAutoScalingWarmPoolStatus) { @property (nonatomic, strong) NSNumber * _Nullable instanceWarmup; /** -The amount of capacity in the Auto Scaling group that must pass your group's health checks to allow the operation to continue. The value is expressed as a percentage of the desired capacity of the Auto Scaling group (rounded up to the nearest integer). The default is 90
.
Setting the minimum healthy percentage to 100 percent limits the rate of replacement to one instance at a time. In contrast, setting it to 0 percent has the effect of replacing all instances at the same time.
+Specifies the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. The value is expressed as a percentage of the desired capacity of the Auto Scaling group. Value range is 100 to 200.
If you specify MaxHealthyPercentage
, you must also specify MinHealthyPercentage
, and the difference between them cannot be greater than 100. A larger range increases the number of instances that can be replaced at the same time.
If you do not specify this property, the default is 100 percent, or the percentage set in the instance maintenance policy for the Auto Scaling group, if defined.
+ */ +@property (nonatomic, strong) NSNumber * _Nullable maxHealthyPercentage; + +/** +Specifies the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload to allow the operation to continue. The value is expressed as a percentage of the desired capacity of the Auto Scaling group. Value range is 0 to 100.
If you do not specify this property, the default is 90 percent, or the percentage set in the instance maintenance policy for the Auto Scaling group, if defined.
*/ @property (nonatomic, strong) NSNumber * _Nullable minHealthyPercentage; @@ -4629,7 +4668,7 @@ typedef NS_ENUM(NSInteger, AWSAutoScalingWarmPoolStatus) { @property (nonatomic, strong) AWSAutoScalingDesiredConfiguration * _Nullable desiredConfiguration; /** -Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum healthy percentage, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby
state or protected from scale in are found. You can also choose to enable additional features, such as the following:
Auto rollback
Checkpoints
CloudWatch alarms
Skip matching
Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum and maximum healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby
state or protected from scale in are found. You can also choose to enable additional features, such as the following:
Auto rollback
Checkpoints
CloudWatch alarms
Skip matching
An instance maintenance policy. For more information, see Set instance maintenance policy in the Amazon EC2 Auto Scaling User Guide.
+ */ +@property (nonatomic, strong) AWSAutoScalingInstanceMaintenancePolicy * _Nullable instanceMaintenancePolicy; + /**The name of the launch configuration. If you specify LaunchConfigurationName
in your update request, you can't specify LaunchTemplate
or MixedInstancesPolicy
.
Gets information about the instance refreshes for the specified Auto Scaling group.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes.
To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information about the instance refreshes you previously initiated, including their status, start time, end time, the percentage of the instance refresh that is complete, and the number of instances remaining to update before the instance refresh is complete. If a rollback is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also returns information about the rollback of the instance refresh.
\"\ + \"documentation\":\"Gets information about the instance refreshes for the specified Auto Scaling group from the previous six weeks.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes.
To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information about the instance refreshes you previously initiated, including their status, start time, end time, the percentage of the instance refresh that is complete, and the number of instances remaining to update before the instance refresh is complete. If a rollback is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also returns information about the rollback of the instance refresh.
\"\ },\ \"DescribeLaunchConfigurations\":{\ \"name\":\"DescribeLaunchConfigurations\",\ @@ -1036,7 +1036,7 @@ - (NSString *)definitionString { {\"shape\":\"ResourceContentionFault\"},\ {\"shape\":\"InstanceRefreshInProgressFault\"}\ ],\ - \"documentation\":\"Starts an instance refresh. During an instance refresh, Amazon EC2 Auto Scaling performs a rolling update of instances in an Auto Scaling group. Instances are terminated first and then replaced, which temporarily reduces the capacity available within your Auto Scaling group.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group. This feature is helpful, for example, when you have a new AMI or a new user data script. You just need to create a new launch template that specifies the new AMI or user data script. Then start an instance refresh to immediately begin the process of updating instances in the group.
If successful, the request's response contains a unique ID that you can use to track the progress of the instance refresh. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh that is in progress, use the CancelInstanceRefresh API.
An instance refresh might fail for several reasons, such as EC2 launch failures, misconfigured health checks, or not ignoring or allowing the termination of instances that are in Standby
state or protected from scale in. You can monitor for failed EC2 launches using the scaling activities. To find the scaling activities, call the DescribeScalingActivities API.
If you enable auto rollback, your Auto Scaling group will be rolled back automatically when the instance refresh fails. You can enable this feature before starting an instance refresh by specifying the AutoRollback
property in the instance refresh preferences. Otherwise, to roll back an instance refresh before it finishes, use the RollbackInstanceRefresh API.
Starts an instance refresh.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group. This feature is helpful, for example, when you have a new AMI or a new user data script. You just need to create a new launch template that specifies the new AMI or user data script. Then start an instance refresh to immediately begin the process of updating instances in the group.
If successful, the request's response contains a unique ID that you can use to track the progress of the instance refresh. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh that is in progress, use the CancelInstanceRefresh API.
An instance refresh might fail for several reasons, such as EC2 launch failures, misconfigured health checks, or not ignoring or allowing the termination of instances that are in Standby
state or protected from scale in. You can monitor for failed EC2 launches using the scaling activities. To find the scaling activities, call the DescribeScalingActivities API.
If you enable auto rollback, your Auto Scaling group will be rolled back automatically when the instance refresh fails. You can enable this feature before starting an instance refresh by specifying the AutoRollback
property in the instance refresh preferences. Otherwise, to roll back an instance refresh before it finishes, use the RollbackInstanceRefresh API.
The traffic sources associated with this Auto Scaling group.
\"\ + },\ + \"InstanceMaintenancePolicy\":{\ + \"shape\":\"InstanceMaintenancePolicy\",\ + \"documentation\":\"An instance maintenance policy.
\"\ }\ },\ \"documentation\":\"Describes an Auto Scaling group.
\"\ @@ -2054,6 +2058,10 @@ - (NSString *)definitionString { \"TrafficSources\":{\ \"shape\":\"TrafficSources\",\ \"documentation\":\"The list of traffic sources to attach to this Auto Scaling group. You can use any of the following as traffic sources for an Auto Scaling group: Classic Load Balancer, Application Load Balancer, Gateway Load Balancer, Network Load Balancer, and VPC Lattice.
\"\ + },\ + \"InstanceMaintenancePolicy\":{\ + \"shape\":\"InstanceMaintenancePolicy\",\ + \"documentation\":\"An instance maintenance policy. For more information, see Set instance maintenance policy in the Amazon EC2 Auto Scaling User Guide.
\"\ }\ }\ },\ @@ -3147,6 +3155,20 @@ - (NSString *)definitionString { \"type\":\"list\",\ \"member\":{\"shape\":\"XmlStringMaxLen19\"}\ },\ + \"InstanceMaintenancePolicy\":{\ + \"type\":\"structure\",\ + \"members\":{\ + \"MinHealthyPercentage\":{\ + \"shape\":\"IntPercentResettable\",\ + \"documentation\":\"Specifies the lower threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload when replacing instances. Value range is 0 to 100. To clear a previously set value, specify a value of -1
.
Specifies the upper threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. Value range is 100 to 200. To clear a previously set value, specify a value of -1
.
Both MinHealthyPercentage
and MaxHealthyPercentage
must be specified, and the difference between them cannot be greater than 100. A large range increases the number of instances that can be replaced at the same time.
Describes an instance maintenance policy.
For more information, see Set instance maintenance policy in the Amazon EC2 Auto Scaling User Guide.
\"\ + },\ \"InstanceMetadataEndpointState\":{\ \"type\":\"string\",\ \"enum\":[\ @@ -3359,11 +3381,15 @@ - (NSString *)definitionString { },\ \"SpotMaxPricePercentageOverLowestPrice\":{\ \"shape\":\"NullablePositiveInteger\",\ - \"documentation\":\"The price protection threshold for Spot Instances. This is the maximum youâll pay for a Spot Instance, expressed as a percentage higher than the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.
Default: 100
[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified.
Default: 100
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice
is used and the value for that parameter defaults to 100
.
The price protection threshold for On-Demand Instances. This is the maximum youâll pay for an On-Demand Instance, expressed as a percentage higher than the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.
Default: 20
[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per instance price.
Default: 20
The unit to use for the returned data points. For a complete list of the units that CloudWatch supports, see the MetricDatum data type in the Amazon CloudWatch API Reference.
\"\ }\ },\ - \"documentation\":\"This structure defines the CloudWatch metric to return, along with the statistic, period, and unit.
For more information about the CloudWatch terminology below, see Amazon CloudWatch concepts in the Amazon CloudWatch User Guide.
\"\ + \"documentation\":\"This structure defines the CloudWatch metric to return, along with the statistic and unit.
For more information about the CloudWatch terminology below, see Amazon CloudWatch concepts in the Amazon CloudWatch User Guide.
\"\ },\ \"MetricStatistic\":{\ \"type\":\"string\",\ @@ -4772,7 +4813,7 @@ - (NSString *)definitionString { \"members\":{\ \"MinHealthyPercentage\":{\ \"shape\":\"IntPercent\",\ - \"documentation\":\"The amount of capacity in the Auto Scaling group that must pass your group's health checks to allow the operation to continue. The value is expressed as a percentage of the desired capacity of the Auto Scaling group (rounded up to the nearest integer). The default is 90
.
Setting the minimum healthy percentage to 100 percent limits the rate of replacement to one instance at a time. In contrast, setting it to 0 percent has the effect of replacing all instances at the same time.
\"\ + \"documentation\":\"Specifies the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload to allow the operation to continue. The value is expressed as a percentage of the desired capacity of the Auto Scaling group. Value range is 0 to 100.
If you do not specify this property, the default is 90 percent, or the percentage set in the instance maintenance policy for the Auto Scaling group, if defined.
\"\ },\ \"InstanceWarmup\":{\ \"shape\":\"RefreshInstanceWarmup\",\ @@ -4805,6 +4846,10 @@ - (NSString *)definitionString { \"AlarmSpecification\":{\ \"shape\":\"AlarmSpecification\",\ \"documentation\":\"(Optional) The CloudWatch alarm specification. CloudWatch alarms can be used to identify any issues and fail the operation if an alarm threshold is met.
\"\ + },\ + \"MaxHealthyPercentage\":{\ + \"shape\":\"IntPercent100To200\",\ + \"documentation\":\"Specifies the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. The value is expressed as a percentage of the desired capacity of the Auto Scaling group. Value range is 100 to 200.
If you specify MaxHealthyPercentage
, you must also specify MinHealthyPercentage
, and the difference between them cannot be greater than 100. A larger range increases the number of instances that can be replaced at the same time.
If you do not specify this property, the default is 100 percent, or the percentage set in the instance maintenance policy for the Auto Scaling group, if defined.
\"\ }\ },\ \"documentation\":\"Describes the preferences for an instance refresh.
\"\ @@ -5273,7 +5318,7 @@ - (NSString *)definitionString { },\ \"Preferences\":{\ \"shape\":\"RefreshPreferences\",\ - \"documentation\":\"Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum healthy percentage, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby
state or protected from scale in are found. You can also choose to enable additional features, such as the following:
Auto rollback
Checkpoints
CloudWatch alarms
Skip matching
Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum and maximum healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby
state or protected from scale in are found. You can also choose to enable additional features, such as the following:
Auto rollback
Checkpoints
CloudWatch alarms
Skip matching
The amount of time, in seconds, until a new instance is considered to have finished initializing and resource consumption to become stable after it enters the InService
state.
During an instance refresh, Amazon EC2 Auto Scaling waits for the warm-up period after it replaces an instance before it moves on to replacing the next instance. Amazon EC2 Auto Scaling also waits for the warm-up period before aggregating the metrics for new instances with existing instances in the Amazon CloudWatch metrics that are used for scaling, resulting in more reliable usage data. For more information, see Set the default instance warmup for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.
To manage various warm-up settings at the group level, we recommend that you set the default instance warmup, even if it is set to 0 seconds. To remove a value that you previously set, include the property but specify -1
for the value. However, we strongly recommend keeping the default instance warmup enabled by specifying a value of 0
or other nominal value.
An instance maintenance policy. For more information, see Set instance maintenance policy in the Amazon EC2 Auto Scaling User Guide.
\"\ }\ }\ },\ diff --git a/AWSAutoScaling/AWSAutoScalingService.h b/AWSAutoScaling/AWSAutoScalingService.h index 83496028a16..a97acd8d350 100644 --- a/AWSAutoScaling/AWSAutoScalingService.h +++ b/AWSAutoScaling/AWSAutoScalingService.h @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. @@ -745,7 +745,7 @@ FOUNDATION_EXPORT NSString *const AWSAutoScalingSDKVersion; - (void)describeAutoScalingNotificationTypes:(AWSRequest *)request completionHandler:(void (^ _Nullable)(AWSAutoScalingDescribeAutoScalingNotificationTypesAnswer * _Nullable response, NSError * _Nullable error))completionHandler; /** -Gets information about the instance refreshes for the specified Auto Scaling group.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes.
To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information about the instance refreshes you previously initiated, including their status, start time, end time, the percentage of the instance refresh that is complete, and the number of instances remaining to update before the instance refresh is complete. If a rollback is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also returns information about the rollback of the instance refresh.
+Gets information about the instance refreshes for the specified Auto Scaling group from the previous six weeks.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes.
To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information about the instance refreshes you previously initiated, including their status, start time, end time, the percentage of the instance refresh that is complete, and the number of instances remaining to update before the instance refresh is complete. If a rollback is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also returns information about the rollback of the instance refresh.
@param request A container for the necessary parameters to execute the DescribeInstanceRefreshes service method. @@ -757,7 +757,7 @@ FOUNDATION_EXPORT NSString *const AWSAutoScalingSDKVersion; - (AWSTaskGets information about the instance refreshes for the specified Auto Scaling group.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes.
To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information about the instance refreshes you previously initiated, including their status, start time, end time, the percentage of the instance refresh that is complete, and the number of instances remaining to update before the instance refresh is complete. If a rollback is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also returns information about the rollback of the instance refresh.
+Gets information about the instance refreshes for the specified Auto Scaling group from the previous six weeks.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes.
To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information about the instance refreshes you previously initiated, including their status, start time, end time, the percentage of the instance refresh that is complete, and the number of instances remaining to update before the instance refresh is complete. If a rollback is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also returns information about the rollback of the instance refresh.
@param request A container for the necessary parameters to execute the DescribeInstanceRefreshes service method. @param completionHandler The completion handler to call when the load request is complete. @@ -1646,7 +1646,7 @@ FOUNDATION_EXPORT NSString *const AWSAutoScalingSDKVersion; - (void)setInstanceProtection:(AWSAutoScalingSetInstanceProtectionQuery *)request completionHandler:(void (^ _Nullable)(AWSAutoScalingSetInstanceProtectionAnswer * _Nullable response, NSError * _Nullable error))completionHandler; /** -Starts an instance refresh. During an instance refresh, Amazon EC2 Auto Scaling performs a rolling update of instances in an Auto Scaling group. Instances are terminated first and then replaced, which temporarily reduces the capacity available within your Auto Scaling group.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group. This feature is helpful, for example, when you have a new AMI or a new user data script. You just need to create a new launch template that specifies the new AMI or user data script. Then start an instance refresh to immediately begin the process of updating instances in the group.
If successful, the request's response contains a unique ID that you can use to track the progress of the instance refresh. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh that is in progress, use the CancelInstanceRefresh API.
An instance refresh might fail for several reasons, such as EC2 launch failures, misconfigured health checks, or not ignoring or allowing the termination of instances that are in Standby
state or protected from scale in. You can monitor for failed EC2 launches using the scaling activities. To find the scaling activities, call the DescribeScalingActivities API.
If you enable auto rollback, your Auto Scaling group will be rolled back automatically when the instance refresh fails. You can enable this feature before starting an instance refresh by specifying the AutoRollback
property in the instance refresh preferences. Otherwise, to roll back an instance refresh before it finishes, use the RollbackInstanceRefresh API.
Starts an instance refresh.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group. This feature is helpful, for example, when you have a new AMI or a new user data script. You just need to create a new launch template that specifies the new AMI or user data script. Then start an instance refresh to immediately begin the process of updating instances in the group.
If successful, the request's response contains a unique ID that you can use to track the progress of the instance refresh. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh that is in progress, use the CancelInstanceRefresh API.
An instance refresh might fail for several reasons, such as EC2 launch failures, misconfigured health checks, or not ignoring or allowing the termination of instances that are in Standby
state or protected from scale in. You can monitor for failed EC2 launches using the scaling activities. To find the scaling activities, call the DescribeScalingActivities API.
If you enable auto rollback, your Auto Scaling group will be rolled back automatically when the instance refresh fails. You can enable this feature before starting an instance refresh by specifying the AutoRollback
property in the instance refresh preferences. Otherwise, to roll back an instance refresh before it finishes, use the RollbackInstanceRefresh API.
Starts an instance refresh. During an instance refresh, Amazon EC2 Auto Scaling performs a rolling update of instances in an Auto Scaling group. Instances are terminated first and then replaced, which temporarily reduces the capacity available within your Auto Scaling group.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group. This feature is helpful, for example, when you have a new AMI or a new user data script. You just need to create a new launch template that specifies the new AMI or user data script. Then start an instance refresh to immediately begin the process of updating instances in the group.
If successful, the request's response contains a unique ID that you can use to track the progress of the instance refresh. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh that is in progress, use the CancelInstanceRefresh API.
An instance refresh might fail for several reasons, such as EC2 launch failures, misconfigured health checks, or not ignoring or allowing the termination of instances that are in Standby
state or protected from scale in. You can monitor for failed EC2 launches using the scaling activities. To find the scaling activities, call the DescribeScalingActivities API.
If you enable auto rollback, your Auto Scaling group will be rolled back automatically when the instance refresh fails. You can enable this feature before starting an instance refresh by specifying the AutoRollback
property in the instance refresh preferences. Otherwise, to roll back an instance refresh before it finishes, use the RollbackInstanceRefresh API.
Starts an instance refresh.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group. This feature is helpful, for example, when you have a new AMI or a new user data script. You just need to create a new launch template that specifies the new AMI or user data script. Then start an instance refresh to immediately begin the process of updating instances in the group.
If successful, the request's response contains a unique ID that you can use to track the progress of the instance refresh. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh that is in progress, use the CancelInstanceRefresh API.
An instance refresh might fail for several reasons, such as EC2 launch failures, misconfigured health checks, or not ignoring or allowing the termination of instances that are in Standby
state or protected from scale in. You can monitor for failed EC2 launches using the scaling activities. To find the scaling activities, call the DescribeScalingActivities API.
If you enable auto rollback, your Auto Scaling group will be rolled back automatically when the instance refresh fails. You can enable this feature before starting an instance refresh by specifying the AutoRollback
property in the instance refresh preferences. Otherwise, to roll back an instance refresh before it finishes, use the RollbackInstanceRefresh API.
The IdP details. The following list describes the provider detail keys for each IdP type.
For Google and Login with Amazon:
client_id
client_secret
authorize_scopes
For Facebook:
client_id
client_secret
authorize_scopes
api_version
For Sign in with Apple:
client_id
team_id
key_id
private_key
authorize_scopes
For OpenID Connect (OIDC) providers:
client_id
client_secret
attributes_request_method
oidc_issuer
authorize_scopes
The following keys are only present if Amazon Cognito didn't discover them at the oidc_issuer
URL.
authorize_url
token_url
attributes_url
jwks_uri
Amazon Cognito sets the value of the following keys automatically. They are read-only.
attributes_url_add_attributes
For SAML providers:
MetadataFile or MetadataURL
IDPSignout optional
The scopes, URLs, and identifiers for your external identity provider. The following examples describe the provider detail keys for each IdP type. These values and their schema are subject to change. Social IdP authorize_scopes
values must match the values listed here.
Amazon Cognito accepts the following elements when it can't discover endpoint URLs from oidc_issuer
: attributes_url
, authorize_url
, jwks_uri
, token_url
.
Create or update request: "ProviderDetails": { "attributes_request_method": "GET", "attributes_url": "https://auth.example.com/userInfo", "authorize_scopes": "openid profile email", "authorize_url": "https://auth.example.com/authorize", "client_id": "1example23456789", "client_secret": "provider-app-client-secret", "jwks_uri": "https://auth.example.com/.well-known/jwks.json", "oidc_issuer": "https://auth.example.com", "token_url": "https://example.com/token" }
Describe response: "ProviderDetails": { "attributes_request_method": "GET", "attributes_url": "https://auth.example.com/userInfo", "attributes_url_add_attributes": "false", "authorize_scopes": "openid profile email", "authorize_url": "https://auth.example.com/authorize", "client_id": "1example23456789", "client_secret": "provider-app-client-secret", "jwks_uri": "https://auth.example.com/.well-known/jwks.json", "oidc_issuer": "https://auth.example.com", "token_url": "https://example.com/token" }
Create or update request with Metadata URL: "ProviderDetails": { "IDPInit": "true", "IDPSignout": "true", "EncryptedResponses" : "true", "MetadataURL": "https://auth.example.com/sso/saml/metadata", "RequestSigningAlgorithm": "rsa-sha256" }
Create or update request with Metadata file: "ProviderDetails": { "IDPInit": "true", "IDPSignout": "true", "EncryptedResponses" : "true", "MetadataFile": "[metadata XML]", "RequestSigningAlgorithm": "rsa-sha256" }
The value of MetadataFile
must be the plaintext metadata document with all quote (") characters escaped by backslashes.
Describe response: "ProviderDetails": { "IDPInit": "true", "IDPSignout": "true", "EncryptedResponses" : "true", "ActiveEncryptionCertificate": "[certificate]", "MetadataURL": "https://auth.example.com/sso/saml/metadata", "RequestSigningAlgorithm": "rsa-sha256", "SLORedirectBindingURI": "https://auth.example.com/slo/saml", "SSORedirectBindingURI": "https://auth.example.com/sso/saml" }
Create or update request: "ProviderDetails": { "authorize_scopes": "profile postal_code", "client_id": "amzn1.application-oa2-client.1example23456789", "client_secret": "provider-app-client-secret"
Describe response: "ProviderDetails": { "attributes_url": "https://api.amazon.com/user/profile", "attributes_url_add_attributes": "false", "authorize_scopes": "profile postal_code", "authorize_url": "https://www.amazon.com/ap/oa", "client_id": "amzn1.application-oa2-client.1example23456789", "client_secret": "provider-app-client-secret", "token_request_method": "POST", "token_url": "https://api.amazon.com/auth/o2/token" }
Create or update request: "ProviderDetails": { "authorize_scopes": "email profile openid", "client_id": "1example23456789.apps.googleusercontent.com", "client_secret": "provider-app-client-secret" }
Describe response: "ProviderDetails": { "attributes_url": "https://people.googleapis.com/v1/people/me?personFields=", "attributes_url_add_attributes": "true", "authorize_scopes": "email profile openid", "authorize_url": "https://accounts.google.com/o/oauth2/v2/auth", "client_id": "1example23456789.apps.googleusercontent.com", "client_secret": "provider-app-client-secret", "oidc_issuer": "https://accounts.google.com", "token_request_method": "POST", "token_url": "https://www.googleapis.com/oauth2/v4/token" }
Create or update request: "ProviderDetails": { "authorize_scopes": "email name", "client_id": "com.example.cognito", "private_key": "1EXAMPLE", "key_id": "2EXAMPLE", "team_id": "3EXAMPLE" }
Describe response: "ProviderDetails": { "attributes_url_add_attributes": "false", "authorize_scopes": "email name", "authorize_url": "https://appleid.apple.com/auth/authorize", "client_id": "com.example.cognito", "key_id": "1EXAMPLE", "oidc_issuer": "https://appleid.apple.com", "team_id": "2EXAMPLE", "token_request_method": "POST", "token_url": "https://appleid.apple.com/auth/token" }
Create or update request: "ProviderDetails": { "api_version": "v17.0", "authorize_scopes": "public_profile, email", "client_id": "1example23456789", "client_secret": "provider-app-client-secret" }
Describe response: "ProviderDetails": { "api_version": "v17.0", "attributes_url": "https://graph.facebook.com/v17.0/me?fields=", "attributes_url_add_attributes": "true", "authorize_scopes": "public_profile, email", "authorize_url": "https://www.facebook.com/v17.0/dialog/oauth", "client_id": "1example23456789", "client_secret": "provider-app-client-secret", "token_request_method": "GET", "token_url": "https://graph.facebook.com/v17.0/oauth/access_token" }
The allowed OAuth flows.
Use a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the /oauth2/token
endpoint.
Issue the access token (and, optionally, ID token, based on scopes) directly to your user.
Issue the access token from the /oauth2/token
endpoint directly to a non-person user using a combination of the client ID and client secret.
The OAuth grant types that you want your app client to generate. To create an app client that generates client credentials grants, you must add client_credentials
as the only allowed OAuth flow.
Use a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the /oauth2/token
endpoint.
Issue the access token (and, optionally, ID token, based on scopes) directly to your user.
Issue the access token from the /oauth2/token
endpoint directly to a non-person user using a combination of the client ID and client secret.
The Amazon CloudFront endpoint that you use as the target of the alias that you set up with your Domain Name Service (DNS) provider.
+The Amazon CloudFront endpoint that you use as the target of the alias that you set up with your Domain Name Service (DNS) provider. Amazon Cognito returns this value if you set a custom domain with CustomDomainConfig
. If you set an Amazon Cognito prefix domain, this operation returns a blank response.
The IdP details. The following list describes the provider detail keys for each IdP type.
For Google and Login with Amazon:
client_id
client_secret
authorize_scopes
For Facebook:
client_id
client_secret
authorize_scopes
api_version
For Sign in with Apple:
client_id
team_id
key_id
private_key
You can submit a private_key when you add or update an IdP. Describe operations don't return the private key.
authorize_scopes
For OIDC providers:
client_id
client_secret
attributes_request_method
oidc_issuer
authorize_scopes
The following keys are only present if Amazon Cognito didn't discover them at the oidc_issuer
URL.
authorize_url
token_url
attributes_url
jwks_uri
Amazon Cognito sets the value of the following keys automatically. They are read-only.
attributes_url_add_attributes
For SAML providers:
MetadataFile or MetadataURL
IDPSignout optional
The scopes, URLs, and identifiers for your external identity provider. The following examples describe the provider detail keys for each IdP type. These values and their schema are subject to change. Social IdP authorize_scopes
values must match the values listed here.
Amazon Cognito accepts the following elements when it can't discover endpoint URLs from oidc_issuer
: attributes_url
, authorize_url
, jwks_uri
, token_url
.
Create or update request: "ProviderDetails": { "attributes_request_method": "GET", "attributes_url": "https://auth.example.com/userInfo", "authorize_scopes": "openid profile email", "authorize_url": "https://auth.example.com/authorize", "client_id": "1example23456789", "client_secret": "provider-app-client-secret", "jwks_uri": "https://auth.example.com/.well-known/jwks.json", "oidc_issuer": "https://auth.example.com", "token_url": "https://example.com/token" }
Describe response: "ProviderDetails": { "attributes_request_method": "GET", "attributes_url": "https://auth.example.com/userInfo", "attributes_url_add_attributes": "false", "authorize_scopes": "openid profile email", "authorize_url": "https://auth.example.com/authorize", "client_id": "1example23456789", "client_secret": "provider-app-client-secret", "jwks_uri": "https://auth.example.com/.well-known/jwks.json", "oidc_issuer": "https://auth.example.com", "token_url": "https://example.com/token" }
Create or update request with Metadata URL: "ProviderDetails": { "IDPInit": "true", "IDPSignout": "true", "EncryptedResponses" : "true", "MetadataURL": "https://auth.example.com/sso/saml/metadata", "RequestSigningAlgorithm": "rsa-sha256" }
Create or update request with Metadata file: "ProviderDetails": { "IDPInit": "true", "IDPSignout": "true", "EncryptedResponses" : "true", "MetadataFile": "[metadata XML]", "RequestSigningAlgorithm": "rsa-sha256" }
The value of MetadataFile
must be the plaintext metadata document with all quote (") characters escaped by backslashes.
Describe response: "ProviderDetails": { "IDPInit": "true", "IDPSignout": "true", "EncryptedResponses" : "true", "ActiveEncryptionCertificate": "[certificate]", "MetadataURL": "https://auth.example.com/sso/saml/metadata", "RequestSigningAlgorithm": "rsa-sha256", "SLORedirectBindingURI": "https://auth.example.com/slo/saml", "SSORedirectBindingURI": "https://auth.example.com/sso/saml" }
Create or update request: "ProviderDetails": { "authorize_scopes": "profile postal_code", "client_id": "amzn1.application-oa2-client.1example23456789", "client_secret": "provider-app-client-secret"
Describe response: "ProviderDetails": { "attributes_url": "https://api.amazon.com/user/profile", "attributes_url_add_attributes": "false", "authorize_scopes": "profile postal_code", "authorize_url": "https://www.amazon.com/ap/oa", "client_id": "amzn1.application-oa2-client.1example23456789", "client_secret": "provider-app-client-secret", "token_request_method": "POST", "token_url": "https://api.amazon.com/auth/o2/token" }
Create or update request: "ProviderDetails": { "authorize_scopes": "email profile openid", "client_id": "1example23456789.apps.googleusercontent.com", "client_secret": "provider-app-client-secret" }
Describe response: "ProviderDetails": { "attributes_url": "https://people.googleapis.com/v1/people/me?personFields=", "attributes_url_add_attributes": "true", "authorize_scopes": "email profile openid", "authorize_url": "https://accounts.google.com/o/oauth2/v2/auth", "client_id": "1example23456789.apps.googleusercontent.com", "client_secret": "provider-app-client-secret", "oidc_issuer": "https://accounts.google.com", "token_request_method": "POST", "token_url": "https://www.googleapis.com/oauth2/v4/token" }
Create or update request: "ProviderDetails": { "authorize_scopes": "email name", "client_id": "com.example.cognito", "private_key": "1EXAMPLE", "key_id": "2EXAMPLE", "team_id": "3EXAMPLE" }
Describe response: "ProviderDetails": { "attributes_url_add_attributes": "false", "authorize_scopes": "email name", "authorize_url": "https://appleid.apple.com/auth/authorize", "client_id": "com.example.cognito", "key_id": "1EXAMPLE", "oidc_issuer": "https://appleid.apple.com", "team_id": "2EXAMPLE", "token_request_method": "POST", "token_url": "https://appleid.apple.com/auth/token" }
Create or update request: "ProviderDetails": { "api_version": "v17.0", "authorize_scopes": "public_profile, email", "client_id": "1example23456789", "client_secret": "provider-app-client-secret" }
Describe response: "ProviderDetails": { "api_version": "v17.0", "attributes_url": "https://graph.facebook.com/v17.0/me?fields=", "attributes_url_add_attributes": "true", "authorize_scopes": "public_profile, email", "authorize_url": "https://www.facebook.com/v17.0/dialog/oauth", "client_id": "1example23456789", "client_secret": "provider-app-client-secret", "token_request_method": "GET", "token_url": "https://graph.facebook.com/v17.0/oauth/access_token" }
The IdP details to be updated, such as MetadataURL
and MetadataFile
.
The scopes, URLs, and identifiers for your external identity provider. The following examples describe the provider detail keys for each IdP type. These values and their schema are subject to change. Social IdP authorize_scopes
values must match the values listed here.
Amazon Cognito accepts the following elements when it can't discover endpoint URLs from oidc_issuer
: attributes_url
, authorize_url
, jwks_uri
, token_url
.
Create or update request: "ProviderDetails": { "attributes_request_method": "GET", "attributes_url": "https://auth.example.com/userInfo", "authorize_scopes": "openid profile email", "authorize_url": "https://auth.example.com/authorize", "client_id": "1example23456789", "client_secret": "provider-app-client-secret", "jwks_uri": "https://auth.example.com/.well-known/jwks.json", "oidc_issuer": "https://auth.example.com", "token_url": "https://example.com/token" }
Describe response: "ProviderDetails": { "attributes_request_method": "GET", "attributes_url": "https://auth.example.com/userInfo", "attributes_url_add_attributes": "false", "authorize_scopes": "openid profile email", "authorize_url": "https://auth.example.com/authorize", "client_id": "1example23456789", "client_secret": "provider-app-client-secret", "jwks_uri": "https://auth.example.com/.well-known/jwks.json", "oidc_issuer": "https://auth.example.com", "token_url": "https://example.com/token" }
Create or update request with Metadata URL: "ProviderDetails": { "IDPInit": "true", "IDPSignout": "true", "EncryptedResponses" : "true", "MetadataURL": "https://auth.example.com/sso/saml/metadata", "RequestSigningAlgorithm": "rsa-sha256" }
Create or update request with Metadata file: "ProviderDetails": { "IDPInit": "true", "IDPSignout": "true", "EncryptedResponses" : "true", "MetadataFile": "[metadata XML]", "RequestSigningAlgorithm": "rsa-sha256" }
The value of MetadataFile
must be the plaintext metadata document with all quote (") characters escaped by backslashes.
Describe response: "ProviderDetails": { "IDPInit": "true", "IDPSignout": "true", "EncryptedResponses" : "true", "ActiveEncryptionCertificate": "[certificate]", "MetadataURL": "https://auth.example.com/sso/saml/metadata", "RequestSigningAlgorithm": "rsa-sha256", "SLORedirectBindingURI": "https://auth.example.com/slo/saml", "SSORedirectBindingURI": "https://auth.example.com/sso/saml" }
Create or update request: "ProviderDetails": { "authorize_scopes": "profile postal_code", "client_id": "amzn1.application-oa2-client.1example23456789", "client_secret": "provider-app-client-secret"
Describe response: "ProviderDetails": { "attributes_url": "https://api.amazon.com/user/profile", "attributes_url_add_attributes": "false", "authorize_scopes": "profile postal_code", "authorize_url": "https://www.amazon.com/ap/oa", "client_id": "amzn1.application-oa2-client.1example23456789", "client_secret": "provider-app-client-secret", "token_request_method": "POST", "token_url": "https://api.amazon.com/auth/o2/token" }
Create or update request: "ProviderDetails": { "authorize_scopes": "email profile openid", "client_id": "1example23456789.apps.googleusercontent.com", "client_secret": "provider-app-client-secret" }
Describe response: "ProviderDetails": { "attributes_url": "https://people.googleapis.com/v1/people/me?personFields=", "attributes_url_add_attributes": "true", "authorize_scopes": "email profile openid", "authorize_url": "https://accounts.google.com/o/oauth2/v2/auth", "client_id": "1example23456789.apps.googleusercontent.com", "client_secret": "provider-app-client-secret", "oidc_issuer": "https://accounts.google.com", "token_request_method": "POST", "token_url": "https://www.googleapis.com/oauth2/v4/token" }
Create or update request: "ProviderDetails": { "authorize_scopes": "email name", "client_id": "com.example.cognito", "private_key": "1EXAMPLE", "key_id": "2EXAMPLE", "team_id": "3EXAMPLE" }
Describe response: "ProviderDetails": { "attributes_url_add_attributes": "false", "authorize_scopes": "email name", "authorize_url": "https://appleid.apple.com/auth/authorize", "client_id": "com.example.cognito", "key_id": "1EXAMPLE", "oidc_issuer": "https://appleid.apple.com", "team_id": "2EXAMPLE", "token_request_method": "POST", "token_url": "https://appleid.apple.com/auth/token" }
Create or update request: "ProviderDetails": { "api_version": "v17.0", "authorize_scopes": "public_profile, email", "client_id": "1example23456789", "client_secret": "provider-app-client-secret" }
Describe response: "ProviderDetails": { "api_version": "v17.0", "attributes_url": "https://graph.facebook.com/v17.0/me?fields=", "attributes_url_add_attributes": "true", "authorize_scopes": "public_profile, email", "authorize_url": "https://www.facebook.com/v17.0/dialog/oauth", "client_id": "1example23456789", "client_secret": "provider-app-client-secret", "token_request_method": "GET", "token_url": "https://graph.facebook.com/v17.0/oauth/access_token" }
Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken
request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.
Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP
or SOFTWARE_TOKEN_SETUP
challenge each time your user signs. Complete setup with AssociateSoftwareToken
and VerifySoftwareToken
.
After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA
challenge when they authenticate. Respond to this challenge with your user's TOTP.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken
request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.
Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP
or SOFTWARE_TOKEN_SETUP
challenge each time your user signs. Complete setup with AssociateSoftwareToken
and VerifySoftwareToken
.
After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA
challenge when they authenticate. Respond to this challenge with your user's TOTP.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Changes the password for a specified user in a user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Changes the password for a specified user in a user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Confirms tracking of the device. This API call is the call that begins device tracking.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Confirms tracking of the device. This API call is the call that begins device tracking. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Allows a user to enter a confirmation code to reset a forgotten password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Allows a user to enter a confirmation code to reset a forgotten password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This public API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message.
Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This public API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message.
Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Creates an IdP for a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Adds a configuration and trust relationship between a third-party identity provider (IdP) and a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Allows a user to delete their own user profile.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Allows a user to delete their own user profile.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Deletes the attributes for a user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Deletes the attributes for a user.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Forgets the specified device.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Forgets the specified device. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username
parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword.
If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException
. If your app client has a client secret and you don't provide a SECRET_HASH
parameter, this API returns NotAuthorizedException
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username
parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword.
If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException
. If your app client has a client secret and you don't provide a SECRET_HASH
parameter, this API returns NotAuthorizedException
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Gets the device.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Gets the device. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Gets the user attributes and metadata for a user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Gets the user attributes and metadata for a user.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior.
Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints.
Amazon Cognito returns an Access Token has been revoked
error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin
.
Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck
enabled for its user pool IdP configuration in CognitoIdentityProvider.
Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests.
Other requests might be valid until your user's token expires.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior.
Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints.
Amazon Cognito returns an Access Token has been revoked
error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin
.
Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck
enabled for its user pool IdP configuration in CognitoIdentityProvider.
Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests.
Other requests might be valid until your user's token expires.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth
. For more information, see Adding user pool sign-in through a third party.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth
. For more information, see Adding user pool sign-in through a third party.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Lists the sign-in devices that Amazon Cognito has registered to the current user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Lists the sign-in devices that Amazon Cognito has registered to the current user. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Resends the confirmation (for confirmation of registration) to a specific user in the user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Resends the confirmation (for confirmation of registration) to a specific user in the user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge
API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge.
For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge
API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge.
For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Registers the user in the specified user pool and creates a user name, password, and user attributes.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Registers the user in the specified user pool and creates a user name, password, and user attributes.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Updates the device status.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Updates the device status. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom:
prefix.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom:
prefix.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \\\"verified\\\" if successful. The request takes an access token or a session string, but not both.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \\\"verified\\\" if successful. The request takes an access token or a session string, but not both.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Verifies the specified user attributes in the user pool.
If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Verifies the specified user attributes in the user pool.
If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
The IdP details. The following list describes the provider detail keys for each IdP type.
For Google and Login with Amazon:
client_id
client_secret
authorize_scopes
For Facebook:
client_id
client_secret
authorize_scopes
api_version
For Sign in with Apple:
client_id
team_id
key_id
private_key
authorize_scopes
For OpenID Connect (OIDC) providers:
client_id
client_secret
attributes_request_method
oidc_issuer
authorize_scopes
The following keys are only present if Amazon Cognito didn't discover them at the oidc_issuer
URL.
authorize_url
token_url
attributes_url
jwks_uri
Amazon Cognito sets the value of the following keys automatically. They are read-only.
attributes_url_add_attributes
For SAML providers:
MetadataFile or MetadataURL
IDPSignout optional
The scopes, URLs, and identifiers for your external identity provider. The following examples describe the provider detail keys for each IdP type. These values and their schema are subject to change. Social IdP authorize_scopes
values must match the values listed here.
Amazon Cognito accepts the following elements when it can't discover endpoint URLs from oidc_issuer
: attributes_url
, authorize_url
, jwks_uri
, token_url
.
Create or update request: \\\"ProviderDetails\\\": { \\\"attributes_request_method\\\": \\\"GET\\\", \\\"attributes_url\\\": \\\"https://auth.example.com/userInfo\\\", \\\"authorize_scopes\\\": \\\"openid profile email\\\", \\\"authorize_url\\\": \\\"https://auth.example.com/authorize\\\", \\\"client_id\\\": \\\"1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"jwks_uri\\\": \\\"https://auth.example.com/.well-known/jwks.json\\\", \\\"oidc_issuer\\\": \\\"https://auth.example.com\\\", \\\"token_url\\\": \\\"https://example.com/token\\\" }
Describe response: \\\"ProviderDetails\\\": { \\\"attributes_request_method\\\": \\\"GET\\\", \\\"attributes_url\\\": \\\"https://auth.example.com/userInfo\\\", \\\"attributes_url_add_attributes\\\": \\\"false\\\", \\\"authorize_scopes\\\": \\\"openid profile email\\\", \\\"authorize_url\\\": \\\"https://auth.example.com/authorize\\\", \\\"client_id\\\": \\\"1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"jwks_uri\\\": \\\"https://auth.example.com/.well-known/jwks.json\\\", \\\"oidc_issuer\\\": \\\"https://auth.example.com\\\", \\\"token_url\\\": \\\"https://example.com/token\\\" }
Create or update request with Metadata URL: \\\"ProviderDetails\\\": { \\\"IDPInit\\\": \\\"true\\\", \\\"IDPSignout\\\": \\\"true\\\", \\\"EncryptedResponses\\\" : \\\"true\\\", \\\"MetadataURL\\\": \\\"https://auth.example.com/sso/saml/metadata\\\", \\\"RequestSigningAlgorithm\\\": \\\"rsa-sha256\\\" }
Create or update request with Metadata file: \\\"ProviderDetails\\\": { \\\"IDPInit\\\": \\\"true\\\", \\\"IDPSignout\\\": \\\"true\\\", \\\"EncryptedResponses\\\" : \\\"true\\\", \\\"MetadataFile\\\": \\\"[metadata XML]\\\", \\\"RequestSigningAlgorithm\\\": \\\"rsa-sha256\\\" }
The value of MetadataFile
must be the plaintext metadata document with all quote (\\\") characters escaped by backslashes.
Describe response: \\\"ProviderDetails\\\": { \\\"IDPInit\\\": \\\"true\\\", \\\"IDPSignout\\\": \\\"true\\\", \\\"EncryptedResponses\\\" : \\\"true\\\", \\\"ActiveEncryptionCertificate\\\": \\\"[certificate]\\\", \\\"MetadataURL\\\": \\\"https://auth.example.com/sso/saml/metadata\\\", \\\"RequestSigningAlgorithm\\\": \\\"rsa-sha256\\\", \\\"SLORedirectBindingURI\\\": \\\"https://auth.example.com/slo/saml\\\", \\\"SSORedirectBindingURI\\\": \\\"https://auth.example.com/sso/saml\\\" }
Create or update request: \\\"ProviderDetails\\\": { \\\"authorize_scopes\\\": \\\"profile postal_code\\\", \\\"client_id\\\": \\\"amzn1.application-oa2-client.1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\"
Describe response: \\\"ProviderDetails\\\": { \\\"attributes_url\\\": \\\"https://api.amazon.com/user/profile\\\", \\\"attributes_url_add_attributes\\\": \\\"false\\\", \\\"authorize_scopes\\\": \\\"profile postal_code\\\", \\\"authorize_url\\\": \\\"https://www.amazon.com/ap/oa\\\", \\\"client_id\\\": \\\"amzn1.application-oa2-client.1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"token_request_method\\\": \\\"POST\\\", \\\"token_url\\\": \\\"https://api.amazon.com/auth/o2/token\\\" }
Create or update request: \\\"ProviderDetails\\\": { \\\"authorize_scopes\\\": \\\"email profile openid\\\", \\\"client_id\\\": \\\"1example23456789.apps.googleusercontent.com\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\" }
Describe response: \\\"ProviderDetails\\\": { \\\"attributes_url\\\": \\\"https://people.googleapis.com/v1/people/me?personFields=\\\", \\\"attributes_url_add_attributes\\\": \\\"true\\\", \\\"authorize_scopes\\\": \\\"email profile openid\\\", \\\"authorize_url\\\": \\\"https://accounts.google.com/o/oauth2/v2/auth\\\", \\\"client_id\\\": \\\"1example23456789.apps.googleusercontent.com\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"oidc_issuer\\\": \\\"https://accounts.google.com\\\", \\\"token_request_method\\\": \\\"POST\\\", \\\"token_url\\\": \\\"https://www.googleapis.com/oauth2/v4/token\\\" }
Create or update request: \\\"ProviderDetails\\\": { \\\"authorize_scopes\\\": \\\"email name\\\", \\\"client_id\\\": \\\"com.example.cognito\\\", \\\"private_key\\\": \\\"1EXAMPLE\\\", \\\"key_id\\\": \\\"2EXAMPLE\\\", \\\"team_id\\\": \\\"3EXAMPLE\\\" }
Describe response: \\\"ProviderDetails\\\": { \\\"attributes_url_add_attributes\\\": \\\"false\\\", \\\"authorize_scopes\\\": \\\"email name\\\", \\\"authorize_url\\\": \\\"https://appleid.apple.com/auth/authorize\\\", \\\"client_id\\\": \\\"com.example.cognito\\\", \\\"key_id\\\": \\\"1EXAMPLE\\\", \\\"oidc_issuer\\\": \\\"https://appleid.apple.com\\\", \\\"team_id\\\": \\\"2EXAMPLE\\\", \\\"token_request_method\\\": \\\"POST\\\", \\\"token_url\\\": \\\"https://appleid.apple.com/auth/token\\\" }
Create or update request: \\\"ProviderDetails\\\": { \\\"api_version\\\": \\\"v17.0\\\", \\\"authorize_scopes\\\": \\\"public_profile, email\\\", \\\"client_id\\\": \\\"1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\" }
Describe response: \\\"ProviderDetails\\\": { \\\"api_version\\\": \\\"v17.0\\\", \\\"attributes_url\\\": \\\"https://graph.facebook.com/v17.0/me?fields=\\\", \\\"attributes_url_add_attributes\\\": \\\"true\\\", \\\"authorize_scopes\\\": \\\"public_profile, email\\\", \\\"authorize_url\\\": \\\"https://www.facebook.com/v17.0/dialog/oauth\\\", \\\"client_id\\\": \\\"1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"token_request_method\\\": \\\"GET\\\", \\\"token_url\\\": \\\"https://graph.facebook.com/v17.0/oauth/access_token\\\" }
The allowed OAuth flows.
Use a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the /oauth2/token
endpoint.
Issue the access token (and, optionally, ID token, based on scopes) directly to your user.
Issue the access token from the /oauth2/token
endpoint directly to a non-person user using a combination of the client ID and client secret.
The OAuth grant types that you want your app client to generate. To create an app client that generates client credentials grants, you must add client_credentials
as the only allowed OAuth flow.
Use a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the /oauth2/token
endpoint.
Issue the access token (and, optionally, ID token, based on scopes) directly to your user.
Issue the access token from the /oauth2/token
endpoint directly to a non-person user using a combination of the client ID and client secret.
The Amazon CloudFront endpoint that you use as the target of the alias that you set up with your Domain Name Service (DNS) provider.
\"\ + \"documentation\":\"The Amazon CloudFront endpoint that you use as the target of the alias that you set up with your Domain Name Service (DNS) provider. Amazon Cognito returns this value if you set a custom domain with CustomDomainConfig
. If you set an Amazon Cognito prefix domain, this operation returns a blank response.
The IdP details. The following list describes the provider detail keys for each IdP type.
For Google and Login with Amazon:
client_id
client_secret
authorize_scopes
For Facebook:
client_id
client_secret
authorize_scopes
api_version
For Sign in with Apple:
client_id
team_id
key_id
private_key
You can submit a private_key when you add or update an IdP. Describe operations don't return the private key.
authorize_scopes
For OIDC providers:
client_id
client_secret
attributes_request_method
oidc_issuer
authorize_scopes
The following keys are only present if Amazon Cognito didn't discover them at the oidc_issuer
URL.
authorize_url
token_url
attributes_url
jwks_uri
Amazon Cognito sets the value of the following keys automatically. They are read-only.
attributes_url_add_attributes
For SAML providers:
MetadataFile or MetadataURL
IDPSignout optional
The scopes, URLs, and identifiers for your external identity provider. The following examples describe the provider detail keys for each IdP type. These values and their schema are subject to change. Social IdP authorize_scopes
values must match the values listed here.
Amazon Cognito accepts the following elements when it can't discover endpoint URLs from oidc_issuer
: attributes_url
, authorize_url
, jwks_uri
, token_url
.
Create or update request: \\\"ProviderDetails\\\": { \\\"attributes_request_method\\\": \\\"GET\\\", \\\"attributes_url\\\": \\\"https://auth.example.com/userInfo\\\", \\\"authorize_scopes\\\": \\\"openid profile email\\\", \\\"authorize_url\\\": \\\"https://auth.example.com/authorize\\\", \\\"client_id\\\": \\\"1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"jwks_uri\\\": \\\"https://auth.example.com/.well-known/jwks.json\\\", \\\"oidc_issuer\\\": \\\"https://auth.example.com\\\", \\\"token_url\\\": \\\"https://example.com/token\\\" }
Describe response: \\\"ProviderDetails\\\": { \\\"attributes_request_method\\\": \\\"GET\\\", \\\"attributes_url\\\": \\\"https://auth.example.com/userInfo\\\", \\\"attributes_url_add_attributes\\\": \\\"false\\\", \\\"authorize_scopes\\\": \\\"openid profile email\\\", \\\"authorize_url\\\": \\\"https://auth.example.com/authorize\\\", \\\"client_id\\\": \\\"1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"jwks_uri\\\": \\\"https://auth.example.com/.well-known/jwks.json\\\", \\\"oidc_issuer\\\": \\\"https://auth.example.com\\\", \\\"token_url\\\": \\\"https://example.com/token\\\" }
Create or update request with Metadata URL: \\\"ProviderDetails\\\": { \\\"IDPInit\\\": \\\"true\\\", \\\"IDPSignout\\\": \\\"true\\\", \\\"EncryptedResponses\\\" : \\\"true\\\", \\\"MetadataURL\\\": \\\"https://auth.example.com/sso/saml/metadata\\\", \\\"RequestSigningAlgorithm\\\": \\\"rsa-sha256\\\" }
Create or update request with Metadata file: \\\"ProviderDetails\\\": { \\\"IDPInit\\\": \\\"true\\\", \\\"IDPSignout\\\": \\\"true\\\", \\\"EncryptedResponses\\\" : \\\"true\\\", \\\"MetadataFile\\\": \\\"[metadata XML]\\\", \\\"RequestSigningAlgorithm\\\": \\\"rsa-sha256\\\" }
The value of MetadataFile
must be the plaintext metadata document with all quote (\\\") characters escaped by backslashes.
Describe response: \\\"ProviderDetails\\\": { \\\"IDPInit\\\": \\\"true\\\", \\\"IDPSignout\\\": \\\"true\\\", \\\"EncryptedResponses\\\" : \\\"true\\\", \\\"ActiveEncryptionCertificate\\\": \\\"[certificate]\\\", \\\"MetadataURL\\\": \\\"https://auth.example.com/sso/saml/metadata\\\", \\\"RequestSigningAlgorithm\\\": \\\"rsa-sha256\\\", \\\"SLORedirectBindingURI\\\": \\\"https://auth.example.com/slo/saml\\\", \\\"SSORedirectBindingURI\\\": \\\"https://auth.example.com/sso/saml\\\" }
Create or update request: \\\"ProviderDetails\\\": { \\\"authorize_scopes\\\": \\\"profile postal_code\\\", \\\"client_id\\\": \\\"amzn1.application-oa2-client.1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\"
Describe response: \\\"ProviderDetails\\\": { \\\"attributes_url\\\": \\\"https://api.amazon.com/user/profile\\\", \\\"attributes_url_add_attributes\\\": \\\"false\\\", \\\"authorize_scopes\\\": \\\"profile postal_code\\\", \\\"authorize_url\\\": \\\"https://www.amazon.com/ap/oa\\\", \\\"client_id\\\": \\\"amzn1.application-oa2-client.1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"token_request_method\\\": \\\"POST\\\", \\\"token_url\\\": \\\"https://api.amazon.com/auth/o2/token\\\" }
Create or update request: \\\"ProviderDetails\\\": { \\\"authorize_scopes\\\": \\\"email profile openid\\\", \\\"client_id\\\": \\\"1example23456789.apps.googleusercontent.com\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\" }
Describe response: \\\"ProviderDetails\\\": { \\\"attributes_url\\\": \\\"https://people.googleapis.com/v1/people/me?personFields=\\\", \\\"attributes_url_add_attributes\\\": \\\"true\\\", \\\"authorize_scopes\\\": \\\"email profile openid\\\", \\\"authorize_url\\\": \\\"https://accounts.google.com/o/oauth2/v2/auth\\\", \\\"client_id\\\": \\\"1example23456789.apps.googleusercontent.com\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"oidc_issuer\\\": \\\"https://accounts.google.com\\\", \\\"token_request_method\\\": \\\"POST\\\", \\\"token_url\\\": \\\"https://www.googleapis.com/oauth2/v4/token\\\" }
Create or update request: \\\"ProviderDetails\\\": { \\\"authorize_scopes\\\": \\\"email name\\\", \\\"client_id\\\": \\\"com.example.cognito\\\", \\\"private_key\\\": \\\"1EXAMPLE\\\", \\\"key_id\\\": \\\"2EXAMPLE\\\", \\\"team_id\\\": \\\"3EXAMPLE\\\" }
Describe response: \\\"ProviderDetails\\\": { \\\"attributes_url_add_attributes\\\": \\\"false\\\", \\\"authorize_scopes\\\": \\\"email name\\\", \\\"authorize_url\\\": \\\"https://appleid.apple.com/auth/authorize\\\", \\\"client_id\\\": \\\"com.example.cognito\\\", \\\"key_id\\\": \\\"1EXAMPLE\\\", \\\"oidc_issuer\\\": \\\"https://appleid.apple.com\\\", \\\"team_id\\\": \\\"2EXAMPLE\\\", \\\"token_request_method\\\": \\\"POST\\\", \\\"token_url\\\": \\\"https://appleid.apple.com/auth/token\\\" }
Create or update request: \\\"ProviderDetails\\\": { \\\"api_version\\\": \\\"v17.0\\\", \\\"authorize_scopes\\\": \\\"public_profile, email\\\", \\\"client_id\\\": \\\"1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\" }
Describe response: \\\"ProviderDetails\\\": { \\\"api_version\\\": \\\"v17.0\\\", \\\"attributes_url\\\": \\\"https://graph.facebook.com/v17.0/me?fields=\\\", \\\"attributes_url_add_attributes\\\": \\\"true\\\", \\\"authorize_scopes\\\": \\\"public_profile, email\\\", \\\"authorize_url\\\": \\\"https://www.facebook.com/v17.0/dialog/oauth\\\", \\\"client_id\\\": \\\"1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"token_request_method\\\": \\\"GET\\\", \\\"token_url\\\": \\\"https://graph.facebook.com/v17.0/oauth/access_token\\\" }
The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.
Set this parameter for legacy purposes. If you also set an ARN in PreTokenGenerationConfig
, its value must be identical to PreTokenGeneration
. For new instances of pre token generation triggers, set the LambdaArn
of PreTokenGenerationConfig
.
You can set
The detailed configuration of a pre token generation trigger. If you also set an ARN in PreTokenGeneration
, its value must be identical to PreTokenGenerationConfig
.
The user migration Lambda config type.
\"\ },\ + \"PreTokenGenerationConfig\":{\ + \"shape\":\"PreTokenGenerationVersionConfigType\",\ + \"documentation\":\"The detailed configuration of a pre token generation trigger. If you also set an ARN in PreTokenGeneration
, its value must be identical to PreTokenGenerationConfig
.
A custom SMS sender Lambda trigger.
\"\ @@ -7635,7 +7635,7 @@ - (NSString *)definitionString { },\ \"ProviderDetails\":{\ \"shape\":\"ProviderDetailsType\",\ - \"documentation\":\"The IdP details to be updated, such as MetadataURL
and MetadataFile
.
The scopes, URLs, and identifiers for your external identity provider. The following examples describe the provider detail keys for each IdP type. These values and their schema are subject to change. Social IdP authorize_scopes
values must match the values listed here.
Amazon Cognito accepts the following elements when it can't discover endpoint URLs from oidc_issuer
: attributes_url
, authorize_url
, jwks_uri
, token_url
.
Create or update request: \\\"ProviderDetails\\\": { \\\"attributes_request_method\\\": \\\"GET\\\", \\\"attributes_url\\\": \\\"https://auth.example.com/userInfo\\\", \\\"authorize_scopes\\\": \\\"openid profile email\\\", \\\"authorize_url\\\": \\\"https://auth.example.com/authorize\\\", \\\"client_id\\\": \\\"1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"jwks_uri\\\": \\\"https://auth.example.com/.well-known/jwks.json\\\", \\\"oidc_issuer\\\": \\\"https://auth.example.com\\\", \\\"token_url\\\": \\\"https://example.com/token\\\" }
Describe response: \\\"ProviderDetails\\\": { \\\"attributes_request_method\\\": \\\"GET\\\", \\\"attributes_url\\\": \\\"https://auth.example.com/userInfo\\\", \\\"attributes_url_add_attributes\\\": \\\"false\\\", \\\"authorize_scopes\\\": \\\"openid profile email\\\", \\\"authorize_url\\\": \\\"https://auth.example.com/authorize\\\", \\\"client_id\\\": \\\"1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"jwks_uri\\\": \\\"https://auth.example.com/.well-known/jwks.json\\\", \\\"oidc_issuer\\\": \\\"https://auth.example.com\\\", \\\"token_url\\\": \\\"https://example.com/token\\\" }
Create or update request with Metadata URL: \\\"ProviderDetails\\\": { \\\"IDPInit\\\": \\\"true\\\", \\\"IDPSignout\\\": \\\"true\\\", \\\"EncryptedResponses\\\" : \\\"true\\\", \\\"MetadataURL\\\": \\\"https://auth.example.com/sso/saml/metadata\\\", \\\"RequestSigningAlgorithm\\\": \\\"rsa-sha256\\\" }
Create or update request with Metadata file: \\\"ProviderDetails\\\": { \\\"IDPInit\\\": \\\"true\\\", \\\"IDPSignout\\\": \\\"true\\\", \\\"EncryptedResponses\\\" : \\\"true\\\", \\\"MetadataFile\\\": \\\"[metadata XML]\\\", \\\"RequestSigningAlgorithm\\\": \\\"rsa-sha256\\\" }
The value of MetadataFile
must be the plaintext metadata document with all quote (\\\") characters escaped by backslashes.
Describe response: \\\"ProviderDetails\\\": { \\\"IDPInit\\\": \\\"true\\\", \\\"IDPSignout\\\": \\\"true\\\", \\\"EncryptedResponses\\\" : \\\"true\\\", \\\"ActiveEncryptionCertificate\\\": \\\"[certificate]\\\", \\\"MetadataURL\\\": \\\"https://auth.example.com/sso/saml/metadata\\\", \\\"RequestSigningAlgorithm\\\": \\\"rsa-sha256\\\", \\\"SLORedirectBindingURI\\\": \\\"https://auth.example.com/slo/saml\\\", \\\"SSORedirectBindingURI\\\": \\\"https://auth.example.com/sso/saml\\\" }
Create or update request: \\\"ProviderDetails\\\": { \\\"authorize_scopes\\\": \\\"profile postal_code\\\", \\\"client_id\\\": \\\"amzn1.application-oa2-client.1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\"
Describe response: \\\"ProviderDetails\\\": { \\\"attributes_url\\\": \\\"https://api.amazon.com/user/profile\\\", \\\"attributes_url_add_attributes\\\": \\\"false\\\", \\\"authorize_scopes\\\": \\\"profile postal_code\\\", \\\"authorize_url\\\": \\\"https://www.amazon.com/ap/oa\\\", \\\"client_id\\\": \\\"amzn1.application-oa2-client.1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"token_request_method\\\": \\\"POST\\\", \\\"token_url\\\": \\\"https://api.amazon.com/auth/o2/token\\\" }
Create or update request: \\\"ProviderDetails\\\": { \\\"authorize_scopes\\\": \\\"email profile openid\\\", \\\"client_id\\\": \\\"1example23456789.apps.googleusercontent.com\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\" }
Describe response: \\\"ProviderDetails\\\": { \\\"attributes_url\\\": \\\"https://people.googleapis.com/v1/people/me?personFields=\\\", \\\"attributes_url_add_attributes\\\": \\\"true\\\", \\\"authorize_scopes\\\": \\\"email profile openid\\\", \\\"authorize_url\\\": \\\"https://accounts.google.com/o/oauth2/v2/auth\\\", \\\"client_id\\\": \\\"1example23456789.apps.googleusercontent.com\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"oidc_issuer\\\": \\\"https://accounts.google.com\\\", \\\"token_request_method\\\": \\\"POST\\\", \\\"token_url\\\": \\\"https://www.googleapis.com/oauth2/v4/token\\\" }
Create or update request: \\\"ProviderDetails\\\": { \\\"authorize_scopes\\\": \\\"email name\\\", \\\"client_id\\\": \\\"com.example.cognito\\\", \\\"private_key\\\": \\\"1EXAMPLE\\\", \\\"key_id\\\": \\\"2EXAMPLE\\\", \\\"team_id\\\": \\\"3EXAMPLE\\\" }
Describe response: \\\"ProviderDetails\\\": { \\\"attributes_url_add_attributes\\\": \\\"false\\\", \\\"authorize_scopes\\\": \\\"email name\\\", \\\"authorize_url\\\": \\\"https://appleid.apple.com/auth/authorize\\\", \\\"client_id\\\": \\\"com.example.cognito\\\", \\\"key_id\\\": \\\"1EXAMPLE\\\", \\\"oidc_issuer\\\": \\\"https://appleid.apple.com\\\", \\\"team_id\\\": \\\"2EXAMPLE\\\", \\\"token_request_method\\\": \\\"POST\\\", \\\"token_url\\\": \\\"https://appleid.apple.com/auth/token\\\" }
Create or update request: \\\"ProviderDetails\\\": { \\\"api_version\\\": \\\"v17.0\\\", \\\"authorize_scopes\\\": \\\"public_profile, email\\\", \\\"client_id\\\": \\\"1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\" }
Describe response: \\\"ProviderDetails\\\": { \\\"api_version\\\": \\\"v17.0\\\", \\\"attributes_url\\\": \\\"https://graph.facebook.com/v17.0/me?fields=\\\", \\\"attributes_url_add_attributes\\\": \\\"true\\\", \\\"authorize_scopes\\\": \\\"public_profile, email\\\", \\\"authorize_url\\\": \\\"https://www.facebook.com/v17.0/dialog/oauth\\\", \\\"client_id\\\": \\\"1example23456789\\\", \\\"client_secret\\\": \\\"provider-app-client-secret\\\", \\\"token_request_method\\\": \\\"GET\\\", \\\"token_url\\\": \\\"https://graph.facebook.com/v17.0/oauth/access_token\\\" }
Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken
request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.
Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP
or SOFTWARE_TOKEN_SETUP
challenge each time your user signs. Complete setup with AssociateSoftwareToken
and VerifySoftwareToken
.
After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA
challenge when they authenticate. Respond to this challenge with your user's TOTP.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken
request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.
Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP
or SOFTWARE_TOKEN_SETUP
challenge each time your user signs. Complete setup with AssociateSoftwareToken
and VerifySoftwareToken
.
After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA
challenge when they authenticate. Respond to this challenge with your user's TOTP.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken
request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.
Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP
or SOFTWARE_TOKEN_SETUP
challenge each time your user signs. Complete setup with AssociateSoftwareToken
and VerifySoftwareToken
.
After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA
challenge when they authenticate. Respond to this challenge with your user's TOTP.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken
request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.
Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP
or SOFTWARE_TOKEN_SETUP
challenge each time your user signs. Complete setup with AssociateSoftwareToken
and VerifySoftwareToken
.
After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA
challenge when they authenticate. Respond to this challenge with your user's TOTP.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Changes the password for a specified user in a user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Changes the password for a specified user in a user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Changes the password for a specified user in a user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Changes the password for a specified user in a user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Confirms tracking of the device. This API call is the call that begins device tracking.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Confirms tracking of the device. This API call is the call that begins device tracking. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Confirms tracking of the device. This API call is the call that begins device tracking.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Confirms tracking of the device. This API call is the call that begins device tracking. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Allows a user to enter a confirmation code to reset a forgotten password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Allows a user to enter a confirmation code to reset a forgotten password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Allows a user to enter a confirmation code to reset a forgotten password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Allows a user to enter a confirmation code to reset a forgotten password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This public API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message.
Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This public API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message.
Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This public API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message.
Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This public API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message.
Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Creates an IdP for a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Adds a configuration and trust relationship between a third-party identity provider (IdP) and a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Creates an IdP for a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Adds a configuration and trust relationship between a third-party identity provider (IdP) and a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Allows a user to delete their own user profile.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Allows a user to delete their own user profile.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Allows a user to delete their own user profile.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Allows a user to delete their own user profile.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Deletes the attributes for a user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Deletes the attributes for a user.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Deletes the attributes for a user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Deletes the attributes for a user.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Forgets the specified device.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Forgets the specified device. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Forgets the specified device.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Forgets the specified device. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username
parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword.
If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException
. If your app client has a client secret and you don't provide a SECRET_HASH
parameter, this API returns NotAuthorizedException
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username
parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword.
If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException
. If your app client has a client secret and you don't provide a SECRET_HASH
parameter, this API returns NotAuthorizedException
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username
parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword.
If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException
. If your app client has a client secret and you don't provide a SECRET_HASH
parameter, this API returns NotAuthorizedException
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username
parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword.
If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException
. If your app client has a client secret and you don't provide a SECRET_HASH
parameter, this API returns NotAuthorizedException
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Gets the device.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Gets the device. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Gets the device.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Gets the device. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Gets the user attributes and metadata for a user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Gets the user attributes and metadata for a user.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Gets the user attributes and metadata for a user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Gets the user attributes and metadata for a user.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior.
Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints.
Amazon Cognito returns an Access Token has been revoked
error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin
.
Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck
enabled for its user pool IdP configuration in CognitoIdentityProvider.
Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests.
Other requests might be valid until your user's token expires.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior.
Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints.
Amazon Cognito returns an Access Token has been revoked
error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin
.
Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck
enabled for its user pool IdP configuration in CognitoIdentityProvider.
Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests.
Other requests might be valid until your user's token expires.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior.
Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints.
Amazon Cognito returns an Access Token has been revoked
error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin
.
Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck
enabled for its user pool IdP configuration in CognitoIdentityProvider.
Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests.
Other requests might be valid until your user's token expires.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior.
Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints.
Amazon Cognito returns an Access Token has been revoked
error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin
.
Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck
enabled for its user pool IdP configuration in CognitoIdentityProvider.
Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests.
Other requests might be valid until your user's token expires.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth
. For more information, see Adding user pool sign-in through a third party.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth
. For more information, see Adding user pool sign-in through a third party.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth
. For more information, see Adding user pool sign-in through a third party.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth
. For more information, see Adding user pool sign-in through a third party.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Lists the sign-in devices that Amazon Cognito has registered to the current user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Lists the sign-in devices that Amazon Cognito has registered to the current user. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Lists the sign-in devices that Amazon Cognito has registered to the current user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Lists the sign-in devices that Amazon Cognito has registered to the current user. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Resends the confirmation (for confirmation of registration) to a specific user in the user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Resends the confirmation (for confirmation of registration) to a specific user in the user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Resends the confirmation (for confirmation of registration) to a specific user in the user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Resends the confirmation (for confirmation of registration) to a specific user in the user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge
API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge.
For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge
API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge.
For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge
API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge.
For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge
API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge.
For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Registers the user in the specified user pool and creates a user name, password, and user attributes.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Registers the user in the specified user pool and creates a user name, password, and user attributes.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Registers the user in the specified user pool and creates a user name, password, and user attributes.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Registers the user in the specified user pool and creates a user name, password, and user attributes.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Updates the device status.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Updates the device status. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Updates the device status.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Updates the device status. For more information about device authentication, see Working with user devices in your user pool.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom:
prefix.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom:
prefix.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom:
prefix.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom:
prefix.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode, you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as "verified" if successful. The request takes an access token or a session string, but not both.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as "verified" if successful. The request takes an access token or a session string, but not both.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as "verified" if successful. The request takes an access token or a session string, but not both.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as "verified" if successful. The request takes an access token or a session string, but not both.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Verifies the specified user attributes in the user pool.
If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Verifies the specified user attributes in the user pool.
If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
Verifies the specified user attributes in the user pool.
If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Verifies the specified user attributes in the user pool.
If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.
Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin
.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.
The language of the input documents. Currently, English is the only valid language.
+The language of the input documents.
*/ @property (nonatomic, assign) AWSComprehendLanguageCode languageCode; @@ -2624,7 +2624,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendToxicContentType) { /** -The language of the input documents. Currently, English is the only valid language.
+The language of the input text. Enter the language code for English (en) or Spanish (es).
*/ @property (nonatomic, assign) AWSComprehendLanguageCode languageCode; @@ -5510,7 +5510,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendToxicContentType) { @property (nonatomic, assign) AWSComprehendJobStatus jobStatus; /** -The language code of the input documents
+The language code of the input documents.
*/ @property (nonatomic, assign) AWSComprehendLanguageCode languageCode; @@ -6270,7 +6270,7 @@ typedef NS_ENUM(NSInteger, AWSComprehendToxicContentType) { @property (nonatomic, strong) NSString * _Nullable jobName; /** -The language of the input documents. Currently, English is the only valid language.
+The language of the input documents. Enter the language code for English (en) or Spanish (es).
*/ @property (nonatomic, assign) AWSComprehendLanguageCode languageCode; diff --git a/AWSComprehend/AWSComprehendModel.m b/AWSComprehend/AWSComprehendModel.m index a5a3a749438..63c5beb1e7f 100644 --- a/AWSComprehend/AWSComprehendModel.m +++ b/AWSComprehend/AWSComprehendModel.m @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. diff --git a/AWSComprehend/AWSComprehendResources.h b/AWSComprehend/AWSComprehendResources.h index 97aff1c75fc..4c032bad383 100644 --- a/AWSComprehend/AWSComprehendResources.h +++ b/AWSComprehend/AWSComprehendResources.h @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. diff --git a/AWSComprehend/AWSComprehendResources.m b/AWSComprehend/AWSComprehendResources.m index 5ed7ed945ab..6ac2e477043 100644 --- a/AWSComprehend/AWSComprehendResources.m +++ b/AWSComprehend/AWSComprehendResources.m @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. @@ -2135,7 +2135,7 @@ - (NSString *)definitionString { },\ \"LanguageCode\":{\ \"shape\":\"LanguageCode\",\ - \"documentation\":\"The language of the input documents. Currently, English is the only valid language.
\"\ + \"documentation\":\"The language of the input documents.
\"\ }\ }\ },\ @@ -3211,7 +3211,7 @@ - (NSString *)definitionString { },\ \"LanguageCode\":{\ \"shape\":\"LanguageCode\",\ - \"documentation\":\"The language of the input documents. Currently, English is the only valid language.
\"\ + \"documentation\":\"The language of the input text. Enter the language code for English (en) or Spanish (es).
\"\ }\ }\ },\ @@ -6004,7 +6004,7 @@ - (NSString *)definitionString { },\ \"LanguageCode\":{\ \"shape\":\"LanguageCode\",\ - \"documentation\":\"The language code of the input documents
\"\ + \"documentation\":\"The language code of the input documents.
\"\ },\ \"DataAccessRoleArn\":{\ \"shape\":\"IamRoleArn\",\ @@ -6783,7 +6783,7 @@ - (NSString *)definitionString { },\ \"LanguageCode\":{\ \"shape\":\"LanguageCode\",\ - \"documentation\":\"The language of the input documents. Currently, English is the only valid language.
\"\ + \"documentation\":\"The language of the input documents. Enter the language code for English (en) or Spanish (es).
\"\ },\ \"ClientRequestToken\":{\ \"shape\":\"ClientRequestTokenString\",\ diff --git a/AWSComprehend/AWSComprehendService.h b/AWSComprehend/AWSComprehendService.h index 09283d5ce84..4f1bba4f847 100644 --- a/AWSComprehend/AWSComprehendService.h +++ b/AWSComprehend/AWSComprehendService.h @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. diff --git a/AWSComprehend/AWSComprehendService.m b/AWSComprehend/AWSComprehendService.m index e70f84de95d..aa6d4033d7f 100644 --- a/AWSComprehend/AWSComprehendService.m +++ b/AWSComprehend/AWSComprehendService.m @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ #import "AWSComprehendResources.h" static NSString *const AWSInfoComprehend = @"Comprehend"; -NSString *const AWSComprehendSDKVersion = @"2.33.10"; +NSString *const AWSComprehendSDKVersion = @"2.34.0"; @interface AWSComprehendResponseSerializer : AWSJSONResponseSerializer diff --git a/AWSComprehend/Info.plist b/AWSComprehend/Info.plist index 00a733ed3c8..f13059556e3 100644 --- a/AWSComprehend/Info.plist +++ b/AWSComprehend/Info.plist @@ -15,7 +15,7 @@The filters to apply to returned metrics. You can filter up to the following limits:
Queues: 100
Routing profiles: 100
Channels: 3 (VOICE, CHAT, and TASK channels are supported.)
RoutingStepExpressions: 50
Metric data is retrieved only for the resources associated with the queues or routing profiles, and by any channels included in the filter. (You cannot filter by both queue AND routing profile.) You can include both resource IDs and resource ARNs in the same request.
When using RoutingStepExpression
, you need to pass exactly one QueueId
.
Currently tagging is only supported on the resources that are passed in the filter.
+The filters to apply to returned metrics. You can filter up to the following limits:
Queues: 100
Routing profiles: 100
Channels: 3 (VOICE, CHAT, and TASK channels are supported.)
RoutingStepExpressions: 50
Metric data is retrieved only for the resources associated with the queues or routing profiles, and by any channels included in the filter. (You cannot filter by both queue AND routing profile.) You can include both resource IDs and resource ARNs in the same request.
When using the RoutingStepExpression
filter, you need to pass exactly one QueueId
. The filter is also case sensitive so when using the RoutingStepExpression
filter, grouping by ROUTING_STEP_EXPRESSION
is required.
Currently tagging is only supported on the resources that are passed in the filter.
*/ @property (nonatomic, strong) AWSConnectFilters * _Nullable filters; @@ -7871,7 +7871,7 @@ typedef NS_ENUM(NSInteger, AWSConnectVoiceRecordingTrack) { @property (nonatomic, strong) NSDate * _Nullable endTime; /** -The filters to apply to returned metrics. You can filter on the following resources:
Queues
Routing profiles
Agents
Channels
User hierarchy groups
Feature
Routing step expression
At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups.
To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator's Guide.
Note the following limits:
Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: QUEUE
| ROUTING_PROFILE
| AGENT
| CHANNEL
| AGENT_HIERARCHY_LEVEL_ONE
| AGENT_HIERARCHY_LEVEL_TWO
| AGENT_HIERARCHY_LEVEL_THREE
| AGENT_HIERARCHY_LEVEL_FOUR
| AGENT_HIERARCHY_LEVEL_FIVE
| FEATURE
| contact/segmentAttributes/connect:Subtype
| ROUTING_STEP_EXPRESSION
Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue
for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters.
contact_lens_conversational_analytics
is a valid filterValue for the FEATURE
filter key. It is available only to contacts analyzed by Contact Lens conversational analytics.
connect:Chat
, connect:SMS
, connect:Telephony
, and connect:WebRTC
are valid filterValue
examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter
key.
ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length.
The filters to apply to returned metrics. You can filter on the following resources:
Queues
Routing profiles
Agents
Channels
User hierarchy groups
Feature
Routing step expression
At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups.
To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator's Guide.
Note the following limits:
Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: QUEUE
| ROUTING_PROFILE
| AGENT
| CHANNEL
| AGENT_HIERARCHY_LEVEL_ONE
| AGENT_HIERARCHY_LEVEL_TWO
| AGENT_HIERARCHY_LEVEL_THREE
| AGENT_HIERARCHY_LEVEL_FOUR
| AGENT_HIERARCHY_LEVEL_FIVE
| FEATURE
| contact/segmentAttributes/connect:Subtype
| ROUTING_STEP_EXPRESSION
Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue
for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters.
contact_lens_conversational_analytics
is a valid filterValue for the FEATURE
filter key. It is available only to contacts analyzed by Contact Lens conversational analytics.
connect:Chat
, connect:SMS
, connect:Telephony
, and connect:WebRTC
are valid filterValue
examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter
key.
ROUTING_STEP_EXPRESSION
is a valid filter key with a filter value up to 3000 length. This filter is case and order sensitive. JSON string fields must be sorted in ascending order and JSON array order should be kept as is.
The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator's Guide.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Data for this metric is available starting from October 1, 2023 0:00:00 GMT.
Unit: Percentage
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
. For now, this metric only supports the following as INITIATION_METHOD
: INBOUND
| OUTBOUND
| CALLBACK
| API
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
The Negate
key in Metric Level Filters is not applicable for this metric.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
Unit: Count
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid metric filter key: INITIATION_METHOD
, DISCONNECT_REASON
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
Threshold: For ThresholdValue
enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for "Less than").
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Percent
Valid groupings and filters: Queue, RoutingStepExpression
Unit: Percent
Valid groupings and filters: Queue, RoutingStepExpression
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
You can include up to 20 SERVICE_LEVEL metrics in a request.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for "Less than").
Unit: Count
Valid groupings and filters: Queue, RoutingStepExpression
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
. This metric only supports the following filter keys as INITIATION_METHOD
: INBOUND
| OUTBOUND
| CALLBACK
| API
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
The Negate
key in Metric Level Filters is not applicable for this metric.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for "Less than").
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for "Less than").
Valid metric filter key: DISCONNECT_REASON
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator's Guide.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Data for this metric is available starting from October 1, 2023 0:00:00 GMT.
Unit: Percentage
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
. For now, this metric only supports the following as INITIATION_METHOD
: INBOUND
| OUTBOUND
| CALLBACK
| API
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
The Negate
key in Metric Level Filters is not applicable for this metric.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
Unit: Count
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid metric filter key: INITIATION_METHOD
, DISCONNECT_REASON
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
Threshold: For ThresholdValue
enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for "Less than").
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Percent
Valid groupings and filters: Queue, RoutingStepExpression
Unit: Percent
Valid groupings and filters: Queue, RoutingStepExpression
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
You can include up to 20 SERVICE_LEVEL metrics in a request.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for "Less than").
Unit: Count
Valid groupings and filters: Queue, RoutingStepExpression
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
. This metric only supports the following filter keys as INITIATION_METHOD
: INBOUND
| OUTBOUND
| CALLBACK
| API
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
The Negate
key in Metric Level Filters is not applicable for this metric.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for "Less than").
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for "Less than").
Valid metric filter key: DISCONNECT_REASON
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
The identifier of the traffic distribution group.
+The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region.
*/ @property (nonatomic, strong) NSString * _Nullable identifier; @@ -17032,7 +17032,7 @@ typedef NS_ENUM(NSInteger, AWSConnectVoiceRecordingTrack) { @property (nonatomic, strong) AWSConnectAgentConfig * _Nullable agentConfig; /** -The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region.
+The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region.
*/ @property (nonatomic, strong) NSString * _Nullable identifier; diff --git a/AWSConnect/AWSConnectResources.m b/AWSConnect/AWSConnectResources.m index 71c6f703c5c..3340f9d46ee 100644 --- a/AWSConnect/AWSConnectResources.m +++ b/AWSConnect/AWSConnectResources.m @@ -10548,7 +10548,7 @@ - (NSString *)definitionString { },\ \"Filters\":{\ \"shape\":\"Filters\",\ - \"documentation\":\"The filters to apply to returned metrics. You can filter up to the following limits:
Queues: 100
Routing profiles: 100
Channels: 3 (VOICE, CHAT, and TASK channels are supported.)
RoutingStepExpressions: 50
Metric data is retrieved only for the resources associated with the queues or routing profiles, and by any channels included in the filter. (You cannot filter by both queue AND routing profile.) You can include both resource IDs and resource ARNs in the same request.
When using RoutingStepExpression
, you need to pass exactly one QueueId
.
Currently tagging is only supported on the resources that are passed in the filter.
\"\ + \"documentation\":\"The filters to apply to returned metrics. You can filter up to the following limits:
Queues: 100
Routing profiles: 100
Channels: 3 (VOICE, CHAT, and TASK channels are supported.)
RoutingStepExpressions: 50
Metric data is retrieved only for the resources associated with the queues or routing profiles, and by any channels included in the filter. (You cannot filter by both queue AND routing profile.) You can include both resource IDs and resource ARNs in the same request.
When using the RoutingStepExpression
filter, you need to pass exactly one QueueId
. The filter is also case sensitive so when using the RoutingStepExpression
filter, grouping by ROUTING_STEP_EXPRESSION
is required.
Currently tagging is only supported on the resources that are passed in the filter.
\"\ },\ \"Groupings\":{\ \"shape\":\"Groupings\",\ @@ -10805,7 +10805,7 @@ - (NSString *)definitionString { },\ \"Filters\":{\ \"shape\":\"FiltersV2List\",\ - \"documentation\":\"The filters to apply to returned metrics. You can filter on the following resources:
Queues
Routing profiles
Agents
Channels
User hierarchy groups
Feature
Routing step expression
At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups.
To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator's Guide.
Note the following limits:
Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: QUEUE
| ROUTING_PROFILE
| AGENT
| CHANNEL
| AGENT_HIERARCHY_LEVEL_ONE
| AGENT_HIERARCHY_LEVEL_TWO
| AGENT_HIERARCHY_LEVEL_THREE
| AGENT_HIERARCHY_LEVEL_FOUR
| AGENT_HIERARCHY_LEVEL_FIVE
| FEATURE
| contact/segmentAttributes/connect:Subtype
| ROUTING_STEP_EXPRESSION
Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue
for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters.
contact_lens_conversational_analytics
is a valid filterValue for the FEATURE
filter key. It is available only to contacts analyzed by Contact Lens conversational analytics.
connect:Chat
, connect:SMS
, connect:Telephony
, and connect:WebRTC
are valid filterValue
examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter
key.
ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length.
The filters to apply to returned metrics. You can filter on the following resources:
Queues
Routing profiles
Agents
Channels
User hierarchy groups
Feature
Routing step expression
At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups.
To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator's Guide.
Note the following limits:
Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: QUEUE
| ROUTING_PROFILE
| AGENT
| CHANNEL
| AGENT_HIERARCHY_LEVEL_ONE
| AGENT_HIERARCHY_LEVEL_TWO
| AGENT_HIERARCHY_LEVEL_THREE
| AGENT_HIERARCHY_LEVEL_FOUR
| AGENT_HIERARCHY_LEVEL_FIVE
| FEATURE
| contact/segmentAttributes/connect:Subtype
| ROUTING_STEP_EXPRESSION
Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue
for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters.
contact_lens_conversational_analytics
is a valid filterValue for the FEATURE
filter key. It is available only to contacts analyzed by Contact Lens conversational analytics.
connect:Chat
, connect:SMS
, connect:Telephony
, and connect:WebRTC
are valid filterValue
examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter
key.
ROUTING_STEP_EXPRESSION
is a valid filter key with a filter value up to 3000 length. This filter is case and order sensitive. JSON string fields must be sorted in ascending order and JSON array order should be kept as is.
The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator's Guide.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Data for this metric is available starting from October 1, 2023 0:00:00 GMT.
Unit: Percentage
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
. For now, this metric only supports the following as INITIATION_METHOD
: INBOUND
| OUTBOUND
| CALLBACK
| API
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
The Negate
key in Metric Level Filters is not applicable for this metric.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
Unit: Count
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid metric filter key: INITIATION_METHOD
, DISCONNECT_REASON
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
Threshold: For ThresholdValue
enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for \\\"Less than\\\").
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Percent
Valid groupings and filters: Queue, RoutingStepExpression
Unit: Percent
Valid groupings and filters: Queue, RoutingStepExpression
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
You can include up to 20 SERVICE_LEVEL metrics in a request.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for \\\"Less than\\\").
Unit: Count
Valid groupings and filters: Queue, RoutingStepExpression
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
. This metric only supports the following filter keys as INITIATION_METHOD
: INBOUND
| OUTBOUND
| CALLBACK
| API
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
The Negate
key in Metric Level Filters is not applicable for this metric.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for \\\"Less than\\\").
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for \\\"Less than\\\").
Valid metric filter key: DISCONNECT_REASON
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator's Guide.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Data for this metric is available starting from October 1, 2023 0:00:00 GMT.
Unit: Percentage
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
. For now, this metric only supports the following as INITIATION_METHOD
: INBOUND
| OUTBOUND
| CALLBACK
| API
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
The Negate
key in Metric Level Filters is not applicable for this metric.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
Unit: Count
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid metric filter key: INITIATION_METHOD
, DISCONNECT_REASON
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid metric filter key: INITIATION_METHOD
Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
Threshold: For ThresholdValue
enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for \\\"Less than\\\").
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype
Feature is a valid filter but not a valid grouping.
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Percent
Valid groupings and filters: Queue, RoutingStepExpression
Unit: Percent
Valid groupings and filters: Queue, RoutingStepExpression
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
This metric is available only for contacts analyzed by Contact Lens conversational analytics.
Unit: Percentage
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
You can include up to 20 SERVICE_LEVEL metrics in a request.
Unit: Percent
Valid groupings and filters: Queue, Channel, Routing Profile
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for \\\"Less than\\\").
Unit: Count
Valid groupings and filters: Queue, RoutingStepExpression
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid metric filter key: INITIATION_METHOD
. This metric only supports the following filter keys as INITIATION_METHOD
: INBOUND
| OUTBOUND
| CALLBACK
| API
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
The Negate
key in Metric Level Filters is not applicable for this metric.
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for \\\"Less than\\\").
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
Threshold: For ThresholdValue
, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison
, you must enter LT
(for \\\"Less than\\\").
Valid metric filter key: DISCONNECT_REASON
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
Unit: Seconds
Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy
Unit: Count
Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype
The identifier of the traffic distribution group.
\",\ + \"documentation\":\"The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region.
\",\ \"location\":\"uri\",\ \"locationName\":\"Id\"\ }\ @@ -11021,7 +11021,7 @@ - (NSString *)definitionString { \"GroupingsV2\":{\ \"type\":\"list\",\ \"member\":{\"shape\":\"GroupingV2\"},\ - \"max\":2\ + \"max\":3\ },\ \"HierarchyGroup\":{\ \"type\":\"structure\",\ @@ -15117,7 +15117,7 @@ - (NSString *)definitionString { },\ \"PredefinedAttributeName\":{\ \"type\":\"string\",\ - \"max\":128,\ + \"max\":64,\ \"min\":1\ },\ \"PredefinedAttributeSearchConditionList\":{\ @@ -15145,13 +15145,13 @@ - (NSString *)definitionString { },\ \"PredefinedAttributeStringValue\":{\ \"type\":\"string\",\ - \"max\":128,\ + \"max\":64,\ \"min\":1\ },\ \"PredefinedAttributeStringValuesList\":{\ \"type\":\"list\",\ \"member\":{\"shape\":\"PredefinedAttributeStringValue\"},\ - \"max\":75,\ + \"max\":128,\ \"min\":1\ },\ \"PredefinedAttributeSummary\":{\ @@ -20773,7 +20773,7 @@ - (NSString *)definitionString { \"members\":{\ \"Id\":{\ \"shape\":\"TrafficDistributionGroupIdOrArn\",\ - \"documentation\":\"The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region.
\",\ + \"documentation\":\"The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region.
\",\ \"location\":\"uri\",\ \"locationName\":\"Id\"\ },\ diff --git a/AWSConnect/AWSConnectService.m b/AWSConnect/AWSConnectService.m index c16ff3429ad..85c3692b8bd 100644 --- a/AWSConnect/AWSConnectService.m +++ b/AWSConnect/AWSConnectService.m @@ -25,7 +25,7 @@ #import "AWSConnectResources.h" static NSString *const AWSInfoConnect = @"Connect"; -NSString *const AWSConnectSDKVersion = @"2.33.10"; +NSString *const AWSConnectSDKVersion = @"2.34.0"; @interface AWSConnectResponseSerializer : AWSJSONResponseSerializer diff --git a/AWSConnect/Info.plist b/AWSConnect/Info.plist index 00a733ed3c8..f13059556e3 100644 --- a/AWSConnect/Info.plist +++ b/AWSConnect/Info.plist @@ -15,7 +15,7 @@The content type of the request. Supported types are:
application/vnd.amazonaws.connect.event.typing
application/vnd.amazonaws.connect.event.connection.acknowledged
application/vnd.amazonaws.connect.event.message.delivered
application/vnd.amazonaws.connect.event.message.read
The content type of the request. Supported types are:
application/vnd.amazonaws.connect.event.typing
application/vnd.amazonaws.connect.event.connection.acknowledged (will be deprecated on December 31, 2024)
application/vnd.amazonaws.connect.event.message.delivered
application/vnd.amazonaws.connect.event.message.read
Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
\"\ + \"documentation\":\"Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
\"\ },\ \"CreateParticipantConnection\":{\ \"name\":\"CreateParticipantConnection\",\ @@ -168,7 +168,7 @@ - (NSString *)definitionString { {\"shape\":\"ThrottlingException\"},\ {\"shape\":\"ValidationException\"}\ ],\ - \"documentation\":\"Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
\"\ + \"documentation\":\"Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.
If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session:
application/vnd.amazonaws.connect.event.participant.left
application/vnd.amazonaws.connect.event.participant.joined
application/vnd.amazonaws.connect.event.chat.ended
application/vnd.amazonaws.connect.event.transfer.succeeded
application/vnd.amazonaws.connect.event.transfer.failed
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
\"\ },\ \"SendEvent\":{\ \"name\":\"SendEvent\",\ @@ -185,7 +185,7 @@ - (NSString *)definitionString { {\"shape\":\"ValidationException\"},\ {\"shape\":\"ConflictException\"}\ ],\ - \"documentation\":\"Sends an event.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
\"\ + \"documentation\":\"The application/vnd.amazonaws.connect.event.connection.acknowledged
ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant
field.
Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
\"\ },\ \"SendMessage\":{\ \"name\":\"SendMessage\",\ @@ -360,7 +360,7 @@ - (NSString *)definitionString { \"members\":{\ \"Message\":{\"shape\":\"Reason\"}\ },\ - \"documentation\":\"An attachment with that identifier is already being uploaded.
\",\ + \"documentation\":\"The requested operation conflicts with the current state of a service resource associated with the request.
\",\ \"error\":{\"httpStatusCode\":409},\ \"exception\":true\ },\ @@ -786,7 +786,7 @@ - (NSString *)definitionString { \"members\":{\ \"ContentType\":{\ \"shape\":\"ChatContentType\",\ - \"documentation\":\"The content type of the request. Supported types are:
application/vnd.amazonaws.connect.event.typing
application/vnd.amazonaws.connect.event.connection.acknowledged
application/vnd.amazonaws.connect.event.message.delivered
application/vnd.amazonaws.connect.event.message.read
The content type of the request. Supported types are:
application/vnd.amazonaws.connect.event.typing
application/vnd.amazonaws.connect.event.connection.acknowledged (will be deprecated on December 31, 2024)
application/vnd.amazonaws.connect.event.message.delivered
application/vnd.amazonaws.connect.event.message.read
Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
+Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
@param request A container for the necessary parameters to execute the CompleteAttachmentUpload service method. @@ -187,7 +187,7 @@ FOUNDATION_EXPORT NSString *const AWSConnectParticipantSDKVersion; - (AWSTaskAllows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
+Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
@param request A container for the necessary parameters to execute the CompleteAttachmentUpload service method. @param completionHandler The completion handler to call when the load request is complete. @@ -300,7 +300,7 @@ FOUNDATION_EXPORT NSString *const AWSConnectParticipantSDKVersion; - (void)getAttachment:(AWSConnectParticipantGetAttachmentRequest *)request completionHandler:(void (^ _Nullable)(AWSConnectParticipantGetAttachmentResponse * _Nullable response, NSError * _Nullable error))completionHandler; /** -Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
+Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.
If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session:
application/vnd.amazonaws.connect.event.participant.left
application/vnd.amazonaws.connect.event.participant.joined
application/vnd.amazonaws.connect.event.chat.ended
application/vnd.amazonaws.connect.event.transfer.succeeded
application/vnd.amazonaws.connect.event.transfer.failed
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
@param request A container for the necessary parameters to execute the GetTranscript service method. @@ -312,7 +312,7 @@ FOUNDATION_EXPORT NSString *const AWSConnectParticipantSDKVersion; - (AWSTaskRetrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
+Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.
If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session:
application/vnd.amazonaws.connect.event.participant.left
application/vnd.amazonaws.connect.event.participant.joined
application/vnd.amazonaws.connect.event.chat.ended
application/vnd.amazonaws.connect.event.transfer.succeeded
application/vnd.amazonaws.connect.event.transfer.failed
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
@param request A container for the necessary parameters to execute the GetTranscript service method. @param completionHandler The completion handler to call when the load request is complete. @@ -325,7 +325,7 @@ FOUNDATION_EXPORT NSString *const AWSConnectParticipantSDKVersion; - (void)getTranscript:(AWSConnectParticipantGetTranscriptRequest *)request completionHandler:(void (^ _Nullable)(AWSConnectParticipantGetTranscriptResponse * _Nullable response, NSError * _Nullable error))completionHandler; /** -Sends an event.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
+The application/vnd.amazonaws.connect.event.connection.acknowledged
ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant
field.
Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
@param request A container for the necessary parameters to execute the SendEvent service method. @@ -337,7 +337,7 @@ FOUNDATION_EXPORT NSString *const AWSConnectParticipantSDKVersion; - (AWSTaskSends an event.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
+The application/vnd.amazonaws.connect.event.connection.acknowledged
ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant
field.
Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception.
ConnectionToken
is used for invoking this API instead of ParticipantToken
.
The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.
@param request A container for the necessary parameters to execute the SendEvent service method. @param completionHandler The completion handler to call when the load request is complete. diff --git a/AWSConnectParticipant/AWSConnectParticipantService.m b/AWSConnectParticipant/AWSConnectParticipantService.m index c47da0e5d38..e93c708a466 100644 --- a/AWSConnectParticipant/AWSConnectParticipantService.m +++ b/AWSConnectParticipant/AWSConnectParticipantService.m @@ -25,7 +25,7 @@ #import "AWSConnectParticipantResources.h" static NSString *const AWSInfoConnectParticipant = @"ConnectParticipant"; -NSString *const AWSConnectParticipantSDKVersion = @"2.33.10"; +NSString *const AWSConnectParticipantSDKVersion = @"2.34.0"; @interface AWSConnectParticipantResponseSerializer : AWSJSONResponseSerializer diff --git a/AWSConnectParticipant/Info.plist b/AWSConnectParticipant/Info.plist index 00a733ed3c8..f13059556e3 100644 --- a/AWSConnectParticipant/Info.plist +++ b/AWSConnectParticipant/Info.plist @@ -15,7 +15,7 @@Represents the DynamoDB Streams configuration for the table.
You receive a ResourceInUseException
if you try to enable a stream on a table that already has a stream, or if you try to disable a stream on a table that doesn't have a stream.
Represents the DynamoDB Streams configuration for the table.
You receive a ValidationException
if you try to enable a stream on a table that already has a stream, or if you try to disable a stream on a table that doesn't have a stream.
Restores the specified table to the specified point in time within EarliestRestorableDateTime
and LatestRestorableDateTime
. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.
Along with data, the following are also included on the new restored table using point in time recovery:
Global secondary indexes (GSIs)
Local secondary indexes (LSIs)
Provisioned read and write capacity
Encryption settings
All these settings come from the current settings of the source table at the time of restore.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Point in time recovery settings
Restores the specified table to the specified point in time within EarliestRestorableDateTime
and LatestRestorableDateTime
. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.
Along with data, the following are also included on the new restored table using point in time recovery:
Global secondary indexes (GSIs)
Local secondary indexes (LSIs)
Provisioned read and write capacity
Encryption settings
All these settings come from the current settings of the source table at the time of restore.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Point in time recovery settings
Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.
This operation only applies to Version 2019.11.21 (Current) of global tables.
You can only perform one of the following operations at once:
Modify the provisioned throughput settings of the table.
Remove a global secondary index from the table.
Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable
to perform other operations.
UpdateTable
is an asynchronous operation; while it is executing, the table status changes from ACTIVE
to UPDATING
. While it is UPDATING
, you cannot issue another UpdateTable
request. When the table returns to the ACTIVE
state, the UpdateTable
operation is complete.
Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.
This operation only applies to Version 2019.11.21 (Current) of global tables.
You can only perform one of the following operations at once:
Modify the provisioned throughput settings of the table.
Remove a global secondary index from the table.
Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable
to perform other operations.
UpdateTable
is an asynchronous operation; while it's executing, the table status changes from ACTIVE
to UPDATING
. While it's UPDATING
, you can't issue another UpdateTable
request on the base table nor any replicas. When the table returns to the ACTIVE
state, the UpdateTable
operation is complete.
Represents the DynamoDB Streams configuration for the table.
You receive a ResourceInUseException
if you try to enable a stream on a table that already has a stream, or if you try to disable a stream on a table that doesn't have a stream.
Represents the DynamoDB Streams configuration for the table.
You receive a ValidationException
if you try to enable a stream on a table that already has a stream, or if you try to disable a stream on a table that doesn't have a stream.
Restores the specified table to the specified point in time within EarliestRestorableDateTime
and LatestRestorableDateTime
. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.
Along with data, the following are also included on the new restored table using point in time recovery:
Global secondary indexes (GSIs)
Local secondary indexes (LSIs)
Provisioned read and write capacity
Encryption settings
All these settings come from the current settings of the source table at the time of restore.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Point in time recovery settings
Restores the specified table to the specified point in time within EarliestRestorableDateTime
and LatestRestorableDateTime
. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.
Along with data, the following are also included on the new restored table using point in time recovery:
Global secondary indexes (GSIs)
Local secondary indexes (LSIs)
Provisioned read and write capacity
Encryption settings
All these settings come from the current settings of the source table at the time of restore.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Point in time recovery settings
Restores the specified table to the specified point in time within EarliestRestorableDateTime
and LatestRestorableDateTime
. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.
Along with data, the following are also included on the new restored table using point in time recovery:
Global secondary indexes (GSIs)
Local secondary indexes (LSIs)
Provisioned read and write capacity
Encryption settings
All these settings come from the current settings of the source table at the time of restore.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Point in time recovery settings
Restores the specified table to the specified point in time within EarliestRestorableDateTime
and LatestRestorableDateTime
. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.
Along with data, the following are also included on the new restored table using point in time recovery:
Global secondary indexes (GSIs)
Local secondary indexes (LSIs)
Provisioned read and write capacity
Encryption settings
All these settings come from the current settings of the source table at the time of restore.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Point in time recovery settings
Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.
This operation only applies to Version 2019.11.21 (Current) of global tables.
You can only perform one of the following operations at once:
Modify the provisioned throughput settings of the table.
Remove a global secondary index from the table.
Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable
to perform other operations.
UpdateTable
is an asynchronous operation; while it is executing, the table status changes from ACTIVE
to UPDATING
. While it is UPDATING
, you cannot issue another UpdateTable
request. When the table returns to the ACTIVE
state, the UpdateTable
operation is complete.
Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.
This operation only applies to Version 2019.11.21 (Current) of global tables.
You can only perform one of the following operations at once:
Modify the provisioned throughput settings of the table.
Remove a global secondary index from the table.
Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable
to perform other operations.
UpdateTable
is an asynchronous operation; while it's executing, the table status changes from ACTIVE
to UPDATING
. While it's UPDATING
, you can't issue another UpdateTable
request on the base table nor any replicas. When the table returns to the ACTIVE
state, the UpdateTable
operation is complete.
Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.
This operation only applies to Version 2019.11.21 (Current) of global tables.
You can only perform one of the following operations at once:
Modify the provisioned throughput settings of the table.
Remove a global secondary index from the table.
Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable
to perform other operations.
UpdateTable
is an asynchronous operation; while it is executing, the table status changes from ACTIVE
to UPDATING
. While it is UPDATING
, you cannot issue another UpdateTable
request. When the table returns to the ACTIVE
state, the UpdateTable
operation is complete.
Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.
This operation only applies to Version 2019.11.21 (Current) of global tables.
You can only perform one of the following operations at once:
Modify the provisioned throughput settings of the table.
Remove a global secondary index from the table.
Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable
to perform other operations.
UpdateTable
is an asynchronous operation; while it's executing, the table status changes from ACTIVE
to UPDATING
. While it's UPDATING
, you can't issue another UpdateTable
request on the base table nor any replicas. When the table returns to the ACTIVE
state, the UpdateTable
operation is complete.
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice
is used and the value for that parameter defaults to 100
.
The minimum and maximum amount of memory per vCPU, in GiB.
Default: No minimum or maximum limits
*/ @@ -35750,7 +35755,7 @@ typedef NS_ENUM(NSInteger, AWSEC2scope) { @property (nonatomic, strong) AWSEC2NetworkInterfaceCount * _Nullable networkInterfaceCount; /** -The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 20
[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 20
The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 100
[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice
is used and the value for that parameter defaults to 100
.
Default: 100
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice
is used and the value for that parameter defaults to 100
.
The minimum and maximum amount of memory per vCPU, in GiB.
Default: No minimum or maximum limits
*/ @@ -35874,7 +35884,7 @@ typedef NS_ENUM(NSInteger, AWSEC2scope) { @property (nonatomic, strong) AWSEC2NetworkInterfaceCountRequest * _Nullable networkInterfaceCount; /** -The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 20
[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 20
The price protection threshold for Spot Instance. This is the maximum you’ll pay for an Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 100
[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice
is used and the value for that parameter defaults to 100
.
Default: 100
The price protection threshold for Spot Instances. This is the maximum youâll pay for a Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 100
[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice
is used and the value for that parameter defaults to 100
.
Default: 100
The price protection threshold for On-Demand Instances. This is the maximum youâll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 20
[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 20
The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes.
You can use strings with one or more wild cards, represented by an asterisk (*
), to allow an instance type, size, or generation. The following are examples: m5.8xlarge
, c5*.*
, m5a.*
, r*
, *3*
.
For example, if you specify c5*
,Amazon EC2 will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*
, Amazon EC2 will allow all the M5a instance types, but not the M5n instance types.
If you specify AllowedInstanceTypes
, you can't specify ExcludedInstanceTypes
.
Default: All instance types
\",\ \"locationName\":\"allowedInstanceTypeSet\"\ + },\ + \"MaxSpotPriceAsPercentageOfOptimalOnDemandPrice\":{\ + \"shape\":\"Integer\",\ + \"documentation\":\"[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice
is used and the value for that parameter defaults to 100
.
The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.
You must specify VCpuCount
and MemoryMiB
. All other attributes are optional. Any unspecified optional attribute is set to its default.
When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.
To limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:
AllowedInstanceTypes
- The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.
ExcludedInstanceTypes
- The instance types to exclude from the list, even if they match your specified attributes.
If you specify InstanceRequirements
, you can't specify InstanceType
.
Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the launch instance wizard or with the RunInstances API, you can't specify InstanceRequirements
.
For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide, and also Attribute-based instance type selection for EC2 Fleet, Attribute-based instance type selection for Spot Fleet, and Spot placement score in the Amazon EC2 User Guide.
\"\ @@ -35650,11 +35655,11 @@ - (NSString *)definitionString { },\ \"SpotMaxPricePercentageOverLowestPrice\":{\ \"shape\":\"Integer\",\ - \"documentation\":\"The price protection threshold for Spot Instance. This is the maximum youâll pay for an Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 100
[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice
is used and the value for that parameter defaults to 100
.
Default: 100
The price protection threshold for On-Demand Instances. This is the maximum youâll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 20
[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 20
The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes.
You can use strings with one or more wild cards, represented by an asterisk (*
), to allow an instance type, size, or generation. The following are examples: m5.8xlarge
, c5*.*
, m5a.*
, r*
, *3*
.
For example, if you specify c5*
,Amazon EC2 will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*
, Amazon EC2 will allow all the M5a instance types, but not the M5n instance types.
If you specify AllowedInstanceTypes
, you can't specify ExcludedInstanceTypes
.
Default: All instance types
\",\ \"locationName\":\"AllowedInstanceType\"\ + },\ + \"MaxSpotPriceAsPercentageOfOptimalOnDemandPrice\":{\ + \"shape\":\"Integer\",\ + \"documentation\":\"[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice
is used and the value for that parameter defaults to 100
.
The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.
You must specify VCpuCount
and MemoryMiB
. All other attributes are optional. Any unspecified optional attribute is set to its default.
When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.
To limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:
AllowedInstanceTypes
- The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.
ExcludedInstanceTypes
- The instance types to exclude from the list, even if they match your specified attributes.
If you specify InstanceRequirements
, you can't specify InstanceType
.
Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the launch instance wizard, or with the RunInstances API or AWS::EC2::Instance Amazon Web Services CloudFormation resource, you can't specify InstanceRequirements
.
For more information, see Attribute-based instance type selection for EC2 Fleet, Attribute-based instance type selection for Spot Fleet, and Spot placement score in the Amazon EC2 User Guide.
\"\ diff --git a/AWSEC2/AWSEC2Service.m b/AWSEC2/AWSEC2Service.m index 554377915d8..b27470c37e9 100644 --- a/AWSEC2/AWSEC2Service.m +++ b/AWSEC2/AWSEC2Service.m @@ -26,7 +26,7 @@ #import "AWSEC2Serializer.h" static NSString *const AWSInfoEC2 = @"EC2"; -NSString *const AWSEC2SDKVersion = @"2.33.10"; +NSString *const AWSEC2SDKVersion = @"2.34.0"; @interface AWSEC2ResponseSerializer : AWSXMLResponseSerializer diff --git a/AWSEC2/Info.plist b/AWSEC2/Info.plist index 00a733ed3c8..f13059556e3 100644 --- a/AWSEC2/Info.plist +++ b/AWSEC2/Info.plist @@ -15,7 +15,7 @@The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.
[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.
+The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.
[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.
*/ @property (nonatomic, strong) NSArrayThe IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.
[Application Load Balancers] You must specify subnets from at least two Availability Zones.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.
+The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.
[Application Load Balancers] You must specify subnets from at least two Availability Zones.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.
*/ @property (nonatomic, strong) NSArrayThe source IP addresses, in CIDR format. You can use both IPv4 and IPv6 addresses. Wildcards are not supported.
If you specify multiple addresses, the condition is satisfied if the source IP address of the request matches one of the CIDR blocks. This condition is not satisfied by the addresses in the X-Forwarded-For header. To search for addresses in the X-Forwarded-For header, use HttpHeaderConditionConfig.
+The source IP addresses, in CIDR format. You can use both IPv4 and IPv6 addresses. Wildcards are not supported.
If you specify multiple addresses, the condition is satisfied if the source IP address of the request matches one of the CIDR blocks. This condition is not satisfied by the addresses in the X-Forwarded-For header. To search for addresses in the X-Forwarded-For header, use HttpHeaderConditionConfig.
The total number of values must be less than, or equal to five.
*/ @property (nonatomic, strong) NSArrayThe name of the attribute.
The following attributes are supported by all load balancers:
deregistration_delay.timeout_seconds
- The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining
to unused
. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.
stickiness.enabled
- Indicates whether target stickiness is enabled. The value is true
or false
. The default is false
.
stickiness.type
- Indicates the type of stickiness. The possible values are:
lb_cookie
and app_cookie
for Application Load Balancers.
source_ip
for Network Load Balancers.
source_ip_dest_ip
and source_ip_dest_ip_proto
for Gateway Load Balancers.
The following attributes are supported by Application Load Balancers and Network Load Balancers:
load_balancing.cross_zone.enabled
- Indicates whether cross zone load balancing is enabled. The value is true
, false
or use_load_balancer_configuration
. The default is use_load_balancer_configuration
.
target_group_health.dns_failover.minimum_healthy_targets.count
- The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off
or an integer from 1 to the maximum number of targets. The default is off
.
target_group_health.dns_failover.minimum_healthy_targets.percentage
- The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off
or an integer from 1 to 100. The default is off
.
target_group_health.unhealthy_state_routing.minimum_healthy_targets.count
- The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.
target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage
- The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are off
or an integer from 1 to 100. The default is off
.
The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:
load_balancing.algorithm.type
- The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin
, least_outstanding_requests
, or weighted_random
. The default is round_robin
.
load_balancing.algorithm.anomaly_mitigation
- Only available when load_balancing.algorithm.type
is weighted_random
. Indicates whether anomaly mitigation is enabled. The value is on
or off
. The default is off
.
slow_start.duration_seconds
- The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).
stickiness.app_cookie.cookie_name
- Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: AWSALB
, AWSALBAPP
, and AWSALBTG
; they're reserved for use by the load balancer.
stickiness.app_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
stickiness.lb_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:
lambda.multi_value_headers.enabled
- Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true
or false
. The default is false
. If the value is false
and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.
The following attributes are supported only by Network Load Balancers:
deregistration_delay.connection_termination.enabled
- Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is true
or false
. For new UDP/TCP_UDP target groups the default is true
. Otherwise, the default is false
.
preserve_client_ip.enabled
- Indicates whether client IP preservation is enabled. The value is true
or false
. The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.
proxy_protocol_v2.enabled
- Indicates whether Proxy Protocol version 2 is enabled. The value is true
or false
. The default is false
.
target_health_state.unhealthy.connection_termination.enabled
- Indicates whether the load balancer terminates connections to unhealthy targets. The value is true
or false
. The default is true
.
The following attributes are supported only by Gateway Load Balancers:
target_failover.on_deregistration
- Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are rebalance
and no_rebalance
. The default is no_rebalance
. The two attributes (target_failover.on_deregistration
and target_failover.on_unhealthy
) can't be set independently. The value you set for both attributes must be the same.
target_failover.on_unhealthy
- Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are rebalance
and no_rebalance
. The default is no_rebalance
. The two attributes (target_failover.on_deregistration
and target_failover.on_unhealthy
) cannot be set independently. The value you set for both attributes must be the same.
The name of the attribute.
The following attributes are supported by all load balancers:
deregistration_delay.timeout_seconds
- The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining
to unused
. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.
stickiness.enabled
- Indicates whether target stickiness is enabled. The value is true
or false
. The default is false
.
stickiness.type
- Indicates the type of stickiness. The possible values are:
lb_cookie
and app_cookie
for Application Load Balancers.
source_ip
for Network Load Balancers.
source_ip_dest_ip
and source_ip_dest_ip_proto
for Gateway Load Balancers.
The following attributes are supported by Application Load Balancers and Network Load Balancers:
load_balancing.cross_zone.enabled
- Indicates whether cross zone load balancing is enabled. The value is true
, false
or use_load_balancer_configuration
. The default is use_load_balancer_configuration
.
target_group_health.dns_failover.minimum_healthy_targets.count
- The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off
or an integer from 1 to the maximum number of targets. The default is off
.
target_group_health.dns_failover.minimum_healthy_targets.percentage
- The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off
or an integer from 1 to 100. The default is off
.
target_group_health.unhealthy_state_routing.minimum_healthy_targets.count
- The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.
target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage
- The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are off
or an integer from 1 to 100. The default is off
.
The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:
load_balancing.algorithm.type
- The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin
, least_outstanding_requests
, or weighted_random
. The default is round_robin
.
load_balancing.algorithm.anomaly_mitigation
- Only available when load_balancing.algorithm.type
is weighted_random
. Indicates whether anomaly mitigation is enabled. The value is on
or off
. The default is off
.
slow_start.duration_seconds
- The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).
stickiness.app_cookie.cookie_name
- Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: AWSALB
, AWSALBAPP
, and AWSALBTG
; they're reserved for use by the load balancer.
stickiness.app_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
stickiness.lb_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:
lambda.multi_value_headers.enabled
- Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true
or false
. The default is false
. If the value is false
and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.
The following attributes are supported only by Network Load Balancers:
deregistration_delay.connection_termination.enabled
- Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is true
or false
. For new UDP/TCP_UDP target groups the default is true
. Otherwise, the default is false
.
preserve_client_ip.enabled
- Indicates whether client IP preservation is enabled. The value is true
or false
. The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.
proxy_protocol_v2.enabled
- Indicates whether Proxy Protocol version 2 is enabled. The value is true
or false
. The default is false
.
target_health_state.unhealthy.connection_termination.enabled
- Indicates whether the load balancer terminates connections to unhealthy targets. The value is true
or false
. The default is true
.
target_health_state.unhealthy.draining_interval_seconds
- The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from unhealthy.draining
to unhealthy
. The range is 0-360000 seconds. The default value is 0 seconds.
Note: This attribute can only be configured when target_health_state.unhealthy.connection_termination.enabled
is false
.
The following attributes are supported only by Gateway Load Balancers:
target_failover.on_deregistration
- Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are rebalance
and no_rebalance
. The default is no_rebalance
. The two attributes (target_failover.on_deregistration
and target_failover.on_unhealthy
) can't be set independently. The value you set for both attributes must be the same.
target_failover.on_unhealthy
- Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are rebalance
and no_rebalance
. The default is no_rebalance
. The two attributes (target_failover.on_deregistration
and target_failover.on_unhealthy
) cannot be set independently. The value you set for both attributes must be the same.
The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.
[Application Load Balancers] You must specify subnets from at least two Availability Zones.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.
\"\ + \"documentation\":\"The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.
[Application Load Balancers] You must specify subnets from at least two Availability Zones.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.
\"\ },\ \"SubnetMappings\":{\ \"shape\":\"SubnetMappings\",\ - \"documentation\":\"The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.
[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.
\"\ + \"documentation\":\"The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.
[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.
\"\ },\ \"SecurityGroups\":{\ \"shape\":\"SecurityGroups\",\ @@ -3607,7 +3607,7 @@ - (NSString *)definitionString { \"members\":{\ \"Values\":{\ \"shape\":\"ListOfString\",\ - \"documentation\":\"The source IP addresses, in CIDR format. You can use both IPv4 and IPv6 addresses. Wildcards are not supported.
If you specify multiple addresses, the condition is satisfied if the source IP address of the request matches one of the CIDR blocks. This condition is not satisfied by the addresses in the X-Forwarded-For header. To search for addresses in the X-Forwarded-For header, use HttpHeaderConditionConfig.
\"\ + \"documentation\":\"The source IP addresses, in CIDR format. You can use both IPv4 and IPv6 addresses. Wildcards are not supported.
If you specify multiple addresses, the condition is satisfied if the source IP address of the request matches one of the CIDR blocks. This condition is not satisfied by the addresses in the X-Forwarded-For header. To search for addresses in the X-Forwarded-For header, use HttpHeaderConditionConfig.
The total number of values must be less than, or equal to five.
\"\ }\ },\ \"documentation\":\"Information about a source IP condition.
You can use this condition to route based on the IP address of the source that connects to the load balancer. If a client is behind a proxy, this is the IP address of the proxy not the IP address of the client.
\"\ @@ -3871,7 +3871,7 @@ - (NSString *)definitionString { \"members\":{\ \"Key\":{\ \"shape\":\"TargetGroupAttributeKey\",\ - \"documentation\":\"The name of the attribute.
The following attributes are supported by all load balancers:
deregistration_delay.timeout_seconds
- The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining
to unused
. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.
stickiness.enabled
- Indicates whether target stickiness is enabled. The value is true
or false
. The default is false
.
stickiness.type
- Indicates the type of stickiness. The possible values are:
lb_cookie
and app_cookie
for Application Load Balancers.
source_ip
for Network Load Balancers.
source_ip_dest_ip
and source_ip_dest_ip_proto
for Gateway Load Balancers.
The following attributes are supported by Application Load Balancers and Network Load Balancers:
load_balancing.cross_zone.enabled
- Indicates whether cross zone load balancing is enabled. The value is true
, false
or use_load_balancer_configuration
. The default is use_load_balancer_configuration
.
target_group_health.dns_failover.minimum_healthy_targets.count
- The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off
or an integer from 1 to the maximum number of targets. The default is off
.
target_group_health.dns_failover.minimum_healthy_targets.percentage
- The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off
or an integer from 1 to 100. The default is off
.
target_group_health.unhealthy_state_routing.minimum_healthy_targets.count
- The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.
target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage
- The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are off
or an integer from 1 to 100. The default is off
.
The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:
load_balancing.algorithm.type
- The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin
, least_outstanding_requests
, or weighted_random
. The default is round_robin
.
load_balancing.algorithm.anomaly_mitigation
- Only available when load_balancing.algorithm.type
is weighted_random
. Indicates whether anomaly mitigation is enabled. The value is on
or off
. The default is off
.
slow_start.duration_seconds
- The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).
stickiness.app_cookie.cookie_name
- Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: AWSALB
, AWSALBAPP
, and AWSALBTG
; they're reserved for use by the load balancer.
stickiness.app_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
stickiness.lb_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:
lambda.multi_value_headers.enabled
- Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true
or false
. The default is false
. If the value is false
and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.
The following attributes are supported only by Network Load Balancers:
deregistration_delay.connection_termination.enabled
- Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is true
or false
. For new UDP/TCP_UDP target groups the default is true
. Otherwise, the default is false
.
preserve_client_ip.enabled
- Indicates whether client IP preservation is enabled. The value is true
or false
. The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.
proxy_protocol_v2.enabled
- Indicates whether Proxy Protocol version 2 is enabled. The value is true
or false
. The default is false
.
target_health_state.unhealthy.connection_termination.enabled
- Indicates whether the load balancer terminates connections to unhealthy targets. The value is true
or false
. The default is true
.
The following attributes are supported only by Gateway Load Balancers:
target_failover.on_deregistration
- Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are rebalance
and no_rebalance
. The default is no_rebalance
. The two attributes (target_failover.on_deregistration
and target_failover.on_unhealthy
) can't be set independently. The value you set for both attributes must be the same.
target_failover.on_unhealthy
- Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are rebalance
and no_rebalance
. The default is no_rebalance
. The two attributes (target_failover.on_deregistration
and target_failover.on_unhealthy
) cannot be set independently. The value you set for both attributes must be the same.
The name of the attribute.
The following attributes are supported by all load balancers:
deregistration_delay.timeout_seconds
- The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining
to unused
. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.
stickiness.enabled
- Indicates whether target stickiness is enabled. The value is true
or false
. The default is false
.
stickiness.type
- Indicates the type of stickiness. The possible values are:
lb_cookie
and app_cookie
for Application Load Balancers.
source_ip
for Network Load Balancers.
source_ip_dest_ip
and source_ip_dest_ip_proto
for Gateway Load Balancers.
The following attributes are supported by Application Load Balancers and Network Load Balancers:
load_balancing.cross_zone.enabled
- Indicates whether cross zone load balancing is enabled. The value is true
, false
or use_load_balancer_configuration
. The default is use_load_balancer_configuration
.
target_group_health.dns_failover.minimum_healthy_targets.count
- The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off
or an integer from 1 to the maximum number of targets. The default is off
.
target_group_health.dns_failover.minimum_healthy_targets.percentage
- The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off
or an integer from 1 to 100. The default is off
.
target_group_health.unhealthy_state_routing.minimum_healthy_targets.count
- The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.
target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage
- The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are off
or an integer from 1 to 100. The default is off
.
The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:
load_balancing.algorithm.type
- The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin
, least_outstanding_requests
, or weighted_random
. The default is round_robin
.
load_balancing.algorithm.anomaly_mitigation
- Only available when load_balancing.algorithm.type
is weighted_random
. Indicates whether anomaly mitigation is enabled. The value is on
or off
. The default is off
.
slow_start.duration_seconds
- The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).
stickiness.app_cookie.cookie_name
- Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: AWSALB
, AWSALBAPP
, and AWSALBTG
; they're reserved for use by the load balancer.
stickiness.app_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
stickiness.lb_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:
lambda.multi_value_headers.enabled
- Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true
or false
. The default is false
. If the value is false
and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.
The following attributes are supported only by Network Load Balancers:
deregistration_delay.connection_termination.enabled
- Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is true
or false
. For new UDP/TCP_UDP target groups the default is true
. Otherwise, the default is false
.
preserve_client_ip.enabled
- Indicates whether client IP preservation is enabled. The value is true
or false
. The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.
proxy_protocol_v2.enabled
- Indicates whether Proxy Protocol version 2 is enabled. The value is true
or false
. The default is false
.
target_health_state.unhealthy.connection_termination.enabled
- Indicates whether the load balancer terminates connections to unhealthy targets. The value is true
or false
. The default is true
.
target_health_state.unhealthy.draining_interval_seconds
- The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from unhealthy.draining
to unhealthy
. The range is 0-360000 seconds. The default value is 0 seconds.
Note: This attribute can only be configured when target_health_state.unhealthy.connection_termination.enabled
is false
.
The following attributes are supported only by Gateway Load Balancers:
target_failover.on_deregistration
- Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are rebalance
and no_rebalance
. The default is no_rebalance
. The two attributes (target_failover.on_deregistration
and target_failover.on_unhealthy
) can't be set independently. The value you set for both attributes must be the same.
target_failover.on_unhealthy
- Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are rebalance
and no_rebalance
. The default is no_rebalance
. The two attributes (target_failover.on_deregistration
and target_failover.on_unhealthy
) cannot be set independently. The value you set for both attributes must be the same.
The server certificate configuration.
+ */ +@property (nonatomic, strong) AWSIoTServerCertificateConfig * _Nullable serverCertificateConfig; + /**The type of service delivered by the endpoint.
Amazon Web Services IoT Core currently supports only the DATA
service type.
The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
+The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
*/ @property (nonatomic, strong) NSArrayThe package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
+The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
*/ @property (nonatomic, strong) NSArrayThe server certificate configuration.
+ */ +@property (nonatomic, strong) AWSIoTServerCertificateConfig * _Nullable serverCertificateConfig; + /**A list containing summary information about the server certificate included in the domain configuration.
*/ @@ -6808,7 +6817,7 @@ typedef NS_ENUM(NSInteger, AWSIoTViolationEventType) { @property (nonatomic, strong) NSString * _Nullable detail; /** -The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
+The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
*/ @property (nonatomic, strong) NSArrayThe package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
+The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
*/ @property (nonatomic, strong) NSArrayThe server certificate configuration.
+ */ +@interface AWSIoTServerCertificateConfig : AWSModel + + +/** +A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server certificate check is enabled or not.
For more information, see Configuring OCSP server-certificate stapling in domain configuration from Amazon Web Services IoT Core Developer Guide.
+ */ +@property (nonatomic, strong) NSNumber * _Nullable enableOCSPCheck; + +@end + /**An object that contains information about a server certificate.
*/ @@ -16338,6 +16360,11 @@ typedef NS_ENUM(NSInteger, AWSIoTViolationEventType) { */ @property (nonatomic, strong) NSNumber * _Nullable removeAuthorizerConfig; +/** +The server certificate configuration.
+ */ +@property (nonatomic, strong) AWSIoTServerCertificateConfig * _Nullable serverCertificateConfig; + /**An object that specifies the TLS configuration for a domain.
*/ diff --git a/AWSIoT/AWSIoTModel.m b/AWSIoT/AWSIoTModel.m index fe13b68e368..a74c29159b9 100644 --- a/AWSIoT/AWSIoTModel.m +++ b/AWSIoT/AWSIoTModel.m @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. @@ -2699,6 +2699,7 @@ + (NSDictionary *)JSONKeyPathsByPropertyKey { @"domainConfigurationName" : @"domainConfigurationName", @"domainName" : @"domainName", @"serverCertificateArns" : @"serverCertificateArns", + @"serverCertificateConfig" : @"serverCertificateConfig", @"serviceType" : @"serviceType", @"tags" : @"tags", @"tlsConfig" : @"tlsConfig", @@ -2710,6 +2711,10 @@ + (NSValueTransformer *)authorizerConfigJSONTransformer { return [NSValueTransformer awsmtl_JSONDictionaryTransformerWithModelClass:[AWSIoTAuthorizerConfig class]]; } ++ (NSValueTransformer *)serverCertificateConfigJSONTransformer { + return [NSValueTransformer awsmtl_JSONDictionaryTransformerWithModelClass:[AWSIoTServerCertificateConfig class]]; +} + + (NSValueTransformer *)serviceTypeJSONTransformer { return [AWSMTLValueTransformer reversibleTransformerWithForwardBlock:^NSNumber *(NSString *value) { if ([value caseInsensitiveCompare:@"DATA"] == NSOrderedSame) { @@ -4841,12 +4846,6 @@ + (NSValueTransformer *)targetTypeJSONTransformer { if ([value caseInsensitiveCompare:@"PRINCIPAL_ID"] == NSOrderedSame) { return @(AWSIoTLogTargetTypePrincipalId); } - if ([value caseInsensitiveCompare:@"EVENT_TYPE"] == NSOrderedSame) { - return @(AWSIoTLogTargetTypeEventType); - } - if ([value caseInsensitiveCompare:@"DEVICE_DEFENDER"] == NSOrderedSame) { - return @(AWSIoTLogTargetTypeDeviceDefender); - } return @(AWSIoTLogTargetTypeUnknown); } reverseBlock:^NSString *(NSNumber *value) { switch ([value integerValue]) { @@ -4860,10 +4859,6 @@ + (NSValueTransformer *)targetTypeJSONTransformer { return @"SOURCE_IP"; case AWSIoTLogTargetTypePrincipalId: return @"PRINCIPAL_ID"; - case AWSIoTLogTargetTypeEventType: - return @"EVENT_TYPE"; - case AWSIoTLogTargetTypeDeviceDefender: - return @"DEVICE_DEFENDER"; default: return nil; } @@ -5664,6 +5659,7 @@ + (NSDictionary *)JSONKeyPathsByPropertyKey { @"domainName" : @"domainName", @"domainType" : @"domainType", @"lastStatusChangeDate" : @"lastStatusChangeDate", + @"serverCertificateConfig" : @"serverCertificateConfig", @"serverCertificates" : @"serverCertificates", @"serviceType" : @"serviceType", @"tlsConfig" : @"tlsConfig", @@ -5729,6 +5725,10 @@ + (NSValueTransformer *)lastStatusChangeDateJSONTransformer { }]; } ++ (NSValueTransformer *)serverCertificateConfigJSONTransformer { + return [NSValueTransformer awsmtl_JSONDictionaryTransformerWithModelClass:[AWSIoTServerCertificateConfig class]]; +} + + (NSValueTransformer *)serverCertificatesJSONTransformer { return [NSValueTransformer awsmtl_JSONArrayTransformerWithModelClass:[AWSIoTServerCertificateSummary class]]; } @@ -12128,12 +12128,6 @@ + (NSValueTransformer *)targetTypeJSONTransformer { if ([value caseInsensitiveCompare:@"PRINCIPAL_ID"] == NSOrderedSame) { return @(AWSIoTLogTargetTypePrincipalId); } - if ([value caseInsensitiveCompare:@"EVENT_TYPE"] == NSOrderedSame) { - return @(AWSIoTLogTargetTypeEventType); - } - if ([value caseInsensitiveCompare:@"DEVICE_DEFENDER"] == NSOrderedSame) { - return @(AWSIoTLogTargetTypeDeviceDefender); - } return @(AWSIoTLogTargetTypeUnknown); } reverseBlock:^NSString *(NSNumber *value) { switch ([value integerValue]) { @@ -12147,10 +12141,6 @@ + (NSValueTransformer *)targetTypeJSONTransformer { return @"SOURCE_IP"; case AWSIoTLogTargetTypePrincipalId: return @"PRINCIPAL_ID"; - case AWSIoTLogTargetTypeEventType: - return @"EVENT_TYPE"; - case AWSIoTLogTargetTypeDeviceDefender: - return @"DEVICE_DEFENDER"; default: return nil; } @@ -12360,12 +12350,6 @@ + (NSValueTransformer *)targetTypeJSONTransformer { if ([value caseInsensitiveCompare:@"PRINCIPAL_ID"] == NSOrderedSame) { return @(AWSIoTLogTargetTypePrincipalId); } - if ([value caseInsensitiveCompare:@"EVENT_TYPE"] == NSOrderedSame) { - return @(AWSIoTLogTargetTypeEventType); - } - if ([value caseInsensitiveCompare:@"DEVICE_DEFENDER"] == NSOrderedSame) { - return @(AWSIoTLogTargetTypeDeviceDefender); - } return @(AWSIoTLogTargetTypeUnknown); } reverseBlock:^NSString *(NSNumber *value) { switch ([value integerValue]) { @@ -12379,10 +12363,6 @@ + (NSValueTransformer *)targetTypeJSONTransformer { return @"SOURCE_IP"; case AWSIoTLogTargetTypePrincipalId: return @"PRINCIPAL_ID"; - case AWSIoTLogTargetTypeEventType: - return @"EVENT_TYPE"; - case AWSIoTLogTargetTypeDeviceDefender: - return @"DEVICE_DEFENDER"; default: return nil; } @@ -14447,6 +14427,20 @@ + (NSValueTransformer *)targetJSONTransformer { @end +@implementation AWSIoTServerCertificateConfig + ++ (BOOL)supportsSecureCoding { + return YES; +} + ++ (NSDictionary *)JSONKeyPathsByPropertyKey { + return @{ + @"enableOCSPCheck" : @"enableOCSPCheck", + }; +} + +@end + @implementation AWSIoTServerCertificateSummary + (BOOL)supportsSecureCoding { @@ -16612,6 +16606,7 @@ + (NSDictionary *)JSONKeyPathsByPropertyKey { @"domainConfigurationName" : @"domainConfigurationName", @"domainConfigurationStatus" : @"domainConfigurationStatus", @"removeAuthorizerConfig" : @"removeAuthorizerConfig", + @"serverCertificateConfig" : @"serverCertificateConfig", @"tlsConfig" : @"tlsConfig", }; } @@ -16641,6 +16636,10 @@ + (NSValueTransformer *)domainConfigurationStatusJSONTransformer { }]; } ++ (NSValueTransformer *)serverCertificateConfigJSONTransformer { + return [NSValueTransformer awsmtl_JSONDictionaryTransformerWithModelClass:[AWSIoTServerCertificateConfig class]]; +} + + (NSValueTransformer *)tlsConfigJSONTransformer { return [NSValueTransformer awsmtl_JSONDictionaryTransformerWithModelClass:[AWSIoTTlsConfig class]]; } diff --git a/AWSIoT/AWSIoTResources.h b/AWSIoT/AWSIoTResources.h index 3002c70d3bc..778147a5b9c 100644 --- a/AWSIoT/AWSIoTResources.h +++ b/AWSIoT/AWSIoTResources.h @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. diff --git a/AWSIoT/AWSIoTResources.m b/AWSIoT/AWSIoTResources.m index caebb242ed0..8927088bb64 100644 --- a/AWSIoT/AWSIoTResources.m +++ b/AWSIoT/AWSIoTResources.m @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. @@ -1772,7 +1772,7 @@ - (NSString *)definitionString { {\"shape\":\"UnauthorizedException\"},\ {\"shape\":\"ThrottlingException\"}\ ],\ - \"documentation\":\"Returns a unique endpoint specific to the Amazon Web Services account making the call.
Requires permission to access the DescribeEndpoint action.
\"\ + \"documentation\":\"Returns or creates a unique endpoint specific to the Amazon Web Services account making the call.
The first time DescribeEndpoint
is called, an endpoint is created. All subsequent calls to DescribeEndpoint
return the same endpoint.
Requires permission to access the DescribeEndpoint action.
\"\ },\ \"DescribeEventConfigurations\":{\ \"name\":\"DescribeEventConfigurations\",\ @@ -2440,7 +2440,7 @@ - (NSString *)definitionString { {\"shape\":\"InternalFailureException\"},\ {\"shape\":\"InvalidRequestException\"}\ ],\ - \"documentation\":\"Gets a registration code used to register a CA certificate with IoT.
Requires permission to access the GetRegistrationCode action.
\"\ + \"documentation\":\"Gets a registration code used to register a CA certificate with IoT.
IoT will create a registration code as part of this API call if the registration code doesn't exist or has been deleted. If you already have a registration code, this API call will return the same registration code.
Requires permission to access the GetRegistrationCode action.
\"\ },\ \"GetStatistics\":{\ \"name\":\"GetStatistics\",\ @@ -4212,6 +4212,7 @@ - (NSString *)definitionString { \"output\":{\"shape\":\"UpdatePackageResponse\"},\ \"errors\":[\ {\"shape\":\"ThrottlingException\"},\ + {\"shape\":\"ConflictException\"},\ {\"shape\":\"InternalServerException\"},\ {\"shape\":\"ValidationException\"},\ {\"shape\":\"ResourceNotFoundException\"}\ @@ -4230,6 +4231,7 @@ - (NSString *)definitionString { \"output\":{\"shape\":\"UpdatePackageConfigurationResponse\"},\ \"errors\":[\ {\"shape\":\"ThrottlingException\"},\ + {\"shape\":\"ConflictException\"},\ {\"shape\":\"InternalServerException\"},\ {\"shape\":\"ValidationException\"}\ ],\ @@ -4247,6 +4249,7 @@ - (NSString *)definitionString { \"output\":{\"shape\":\"UpdatePackageVersionResponse\"},\ \"errors\":[\ {\"shape\":\"ThrottlingException\"},\ + {\"shape\":\"ConflictException\"},\ {\"shape\":\"InternalServerException\"},\ {\"shape\":\"ValidationException\"},\ {\"shape\":\"ResourceNotFoundException\"}\ @@ -7066,6 +7069,10 @@ - (NSString *)definitionString { \"tlsConfig\":{\ \"shape\":\"TlsConfig\",\ \"documentation\":\"An object that specifies the TLS configuration for a domain.
\"\ + },\ + \"serverCertificateConfig\":{\ + \"shape\":\"ServerCertificateConfig\",\ + \"documentation\":\"The server certificate configuration.
\"\ }\ }\ },\ @@ -7288,7 +7295,7 @@ - (NSString *)definitionString { },\ \"destinationPackageVersions\":{\ \"shape\":\"DestinationPackageVersions\",\ - \"documentation\":\"The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
\"\ + \"documentation\":\"The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
\"\ }\ }\ },\ @@ -7356,7 +7363,7 @@ - (NSString *)definitionString { },\ \"destinationPackageVersions\":{\ \"shape\":\"DestinationPackageVersions\",\ - \"documentation\":\"The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
\"\ + \"documentation\":\"The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
\"\ }\ }\ },\ @@ -9541,6 +9548,10 @@ - (NSString *)definitionString { \"tlsConfig\":{\ \"shape\":\"TlsConfig\",\ \"documentation\":\"An object that specifies the TLS configuration for a domain.
\"\ + },\ + \"serverCertificateConfig\":{\ + \"shape\":\"ServerCertificateConfig\",\ + \"documentation\":\"The server certificate configuration.
\"\ }\ }\ },\ @@ -9800,7 +9811,7 @@ - (NSString *)definitionString { },\ \"destinationPackageVersions\":{\ \"shape\":\"DestinationPackageVersions\",\ - \"documentation\":\"The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
\"\ + \"documentation\":\"The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
\"\ }\ }\ },\ @@ -10965,6 +10976,7 @@ - (NSString *)definitionString { },\ \"documentation\":\"Parameters used when defining a mitigation action that enable Amazon Web Services IoT Core logging.
\"\ },\ + \"EnableOCSPCheck\":{\"type\":\"boolean\"},\ \"EnableTopicRuleRequest\":{\ \"type\":\"structure\",\ \"required\":[\"ruleName\"],\ @@ -12386,7 +12398,7 @@ - (NSString *)definitionString { },\ \"destinationPackageVersions\":{\ \"shape\":\"DestinationPackageVersions\",\ - \"documentation\":\"The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
\"\ + \"documentation\":\"The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
\"\ }\ },\ \"documentation\":\"The Job
object contains details about a job.
A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server certificate check is enabled or not.
For more information, see Configuring OCSP server-certificate stapling in domain configuration from Amazon Web Services IoT Core Developer Guide.
\"\ + }\ + },\ + \"documentation\":\"The server certificate configuration.
\"\ + },\ \"ServerCertificateStatus\":{\ \"type\":\"string\",\ \"enum\":[\ @@ -19876,6 +19896,10 @@ - (NSString *)definitionString { \"tlsConfig\":{\ \"shape\":\"TlsConfig\",\ \"documentation\":\"An object that specifies the TLS configuration for a domain.
\"\ + },\ + \"serverCertificateConfig\":{\ + \"shape\":\"ServerCertificateConfig\",\ + \"documentation\":\"The server certificate configuration.
\"\ }\ }\ },\ diff --git a/AWSIoT/AWSIoTService.h b/AWSIoT/AWSIoTService.h index 91cc8320593..96981179500 100644 --- a/AWSIoT/AWSIoTService.h +++ b/AWSIoT/AWSIoTService.h @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. @@ -2555,7 +2555,7 @@ FOUNDATION_EXPORT NSString *const AWSIoTSDKVersion; - (void)describeDomainConfiguration:(AWSIoTDescribeDomainConfigurationRequest *)request completionHandler:(void (^ _Nullable)(AWSIoTDescribeDomainConfigurationResponse * _Nullable response, NSError * _Nullable error))completionHandler; /** -Returns a unique endpoint specific to the Amazon Web Services account making the call.
Requires permission to access the DescribeEndpoint action.
+Returns or creates a unique endpoint specific to the Amazon Web Services account making the call.
The first time DescribeEndpoint
is called, an endpoint is created. All subsequent calls to DescribeEndpoint
return the same endpoint.
Requires permission to access the DescribeEndpoint action.
@param request A container for the necessary parameters to execute the DescribeEndpoint service method. @@ -2567,7 +2567,7 @@ FOUNDATION_EXPORT NSString *const AWSIoTSDKVersion; - (AWSTaskReturns a unique endpoint specific to the Amazon Web Services account making the call.
Requires permission to access the DescribeEndpoint action.
+Returns or creates a unique endpoint specific to the Amazon Web Services account making the call.
The first time DescribeEndpoint
is called, an endpoint is created. All subsequent calls to DescribeEndpoint
return the same endpoint.
Requires permission to access the DescribeEndpoint action.
@param request A container for the necessary parameters to execute the DescribeEndpoint service method. @param completionHandler The completion handler to call when the load request is complete. @@ -3518,7 +3518,7 @@ FOUNDATION_EXPORT NSString *const AWSIoTSDKVersion; - (void)getPolicyVersion:(AWSIoTGetPolicyVersionRequest *)request completionHandler:(void (^ _Nullable)(AWSIoTGetPolicyVersionResponse * _Nullable response, NSError * _Nullable error))completionHandler; /** -Gets a registration code used to register a CA certificate with IoT.
Requires permission to access the GetRegistrationCode action.
+Gets a registration code used to register a CA certificate with IoT.
IoT will create a registration code as part of this API call if the registration code doesn't exist or has been deleted. If you already have a registration code, this API call will return the same registration code.
Requires permission to access the GetRegistrationCode action.
@param request A container for the necessary parameters to execute the GetRegistrationCode service method. @@ -3530,7 +3530,7 @@ FOUNDATION_EXPORT NSString *const AWSIoTSDKVersion; - (AWSTaskGets a registration code used to register a CA certificate with IoT.
Requires permission to access the GetRegistrationCode action.
+Gets a registration code used to register a CA certificate with IoT.
IoT will create a registration code as part of this API call if the registration code doesn't exist or has been deleted. If you already have a registration code, this API call will return the same registration code.
Requires permission to access the GetRegistrationCode action.
@param request A container for the necessary parameters to execute the GetRegistrationCode service method. @param completionHandler The completion handler to call when the load request is complete. @@ -6142,7 +6142,7 @@ FOUNDATION_EXPORT NSString *const AWSIoTSDKVersion; @param request A container for the necessary parameters to execute the UpdatePackage service method. - @return An instance of `AWSTask`. On successful execution, `task.result` will contain an instance of `AWSIoTUpdatePackageResponse`. On failed execution, `task.error` may contain an `NSError` with `AWSIoTErrorDomain` domain and the following error code: `AWSIoTErrorThrottling`, `AWSIoTErrorInternalServer`, `AWSIoTErrorValidation`, `AWSIoTErrorResourceNotFound`. + @return An instance of `AWSTask`. On successful execution, `task.result` will contain an instance of `AWSIoTUpdatePackageResponse`. On failed execution, `task.error` may contain an `NSError` with `AWSIoTErrorDomain` domain and the following error code: `AWSIoTErrorThrottling`, `AWSIoTErrorConflict`, `AWSIoTErrorInternalServer`, `AWSIoTErrorValidation`, `AWSIoTErrorResourceNotFound`. @see AWSIoTUpdatePackageRequest @see AWSIoTUpdatePackageResponse @@ -6155,7 +6155,7 @@ FOUNDATION_EXPORT NSString *const AWSIoTSDKVersion; @param request A container for the necessary parameters to execute the UpdatePackage service method. @param completionHandler The completion handler to call when the load request is complete. `response` - A response object, or `nil` if the request failed. - `error` - An error object that indicates why the request failed, or `nil` if the request was successful. On failed execution, `error` may contain an `NSError` with `AWSIoTErrorDomain` domain and the following error code: `AWSIoTErrorThrottling`, `AWSIoTErrorInternalServer`, `AWSIoTErrorValidation`, `AWSIoTErrorResourceNotFound`. + `error` - An error object that indicates why the request failed, or `nil` if the request was successful. On failed execution, `error` may contain an `NSError` with `AWSIoTErrorDomain` domain and the following error code: `AWSIoTErrorThrottling`, `AWSIoTErrorConflict`, `AWSIoTErrorInternalServer`, `AWSIoTErrorValidation`, `AWSIoTErrorResourceNotFound`. @see AWSIoTUpdatePackageRequest @see AWSIoTUpdatePackageResponse @@ -6167,7 +6167,7 @@ FOUNDATION_EXPORT NSString *const AWSIoTSDKVersion; @param request A container for the necessary parameters to execute the UpdatePackageConfiguration service method. - @return An instance of `AWSTask`. On successful execution, `task.result` will contain an instance of `AWSIoTUpdatePackageConfigurationResponse`. On failed execution, `task.error` may contain an `NSError` with `AWSIoTErrorDomain` domain and the following error code: `AWSIoTErrorThrottling`, `AWSIoTErrorInternalServer`, `AWSIoTErrorValidation`. + @return An instance of `AWSTask`. On successful execution, `task.result` will contain an instance of `AWSIoTUpdatePackageConfigurationResponse`. On failed execution, `task.error` may contain an `NSError` with `AWSIoTErrorDomain` domain and the following error code: `AWSIoTErrorThrottling`, `AWSIoTErrorConflict`, `AWSIoTErrorInternalServer`, `AWSIoTErrorValidation`. @see AWSIoTUpdatePackageConfigurationRequest @see AWSIoTUpdatePackageConfigurationResponse @@ -6180,7 +6180,7 @@ FOUNDATION_EXPORT NSString *const AWSIoTSDKVersion; @param request A container for the necessary parameters to execute the UpdatePackageConfiguration service method. @param completionHandler The completion handler to call when the load request is complete. `response` - A response object, or `nil` if the request failed. - `error` - An error object that indicates why the request failed, or `nil` if the request was successful. On failed execution, `error` may contain an `NSError` with `AWSIoTErrorDomain` domain and the following error code: `AWSIoTErrorThrottling`, `AWSIoTErrorInternalServer`, `AWSIoTErrorValidation`. + `error` - An error object that indicates why the request failed, or `nil` if the request was successful. On failed execution, `error` may contain an `NSError` with `AWSIoTErrorDomain` domain and the following error code: `AWSIoTErrorThrottling`, `AWSIoTErrorConflict`, `AWSIoTErrorInternalServer`, `AWSIoTErrorValidation`. @see AWSIoTUpdatePackageConfigurationRequest @see AWSIoTUpdatePackageConfigurationResponse @@ -6192,7 +6192,7 @@ FOUNDATION_EXPORT NSString *const AWSIoTSDKVersion; @param request A container for the necessary parameters to execute the UpdatePackageVersion service method. - @return An instance of `AWSTask`. On successful execution, `task.result` will contain an instance of `AWSIoTUpdatePackageVersionResponse`. On failed execution, `task.error` may contain an `NSError` with `AWSIoTErrorDomain` domain and the following error code: `AWSIoTErrorThrottling`, `AWSIoTErrorInternalServer`, `AWSIoTErrorValidation`, `AWSIoTErrorResourceNotFound`. + @return An instance of `AWSTask`. On successful execution, `task.result` will contain an instance of `AWSIoTUpdatePackageVersionResponse`. On failed execution, `task.error` may contain an `NSError` with `AWSIoTErrorDomain` domain and the following error code: `AWSIoTErrorThrottling`, `AWSIoTErrorConflict`, `AWSIoTErrorInternalServer`, `AWSIoTErrorValidation`, `AWSIoTErrorResourceNotFound`. @see AWSIoTUpdatePackageVersionRequest @see AWSIoTUpdatePackageVersionResponse @@ -6205,7 +6205,7 @@ FOUNDATION_EXPORT NSString *const AWSIoTSDKVersion; @param request A container for the necessary parameters to execute the UpdatePackageVersion service method. @param completionHandler The completion handler to call when the load request is complete. `response` - A response object, or `nil` if the request failed. - `error` - An error object that indicates why the request failed, or `nil` if the request was successful. On failed execution, `error` may contain an `NSError` with `AWSIoTErrorDomain` domain and the following error code: `AWSIoTErrorThrottling`, `AWSIoTErrorInternalServer`, `AWSIoTErrorValidation`, `AWSIoTErrorResourceNotFound`. + `error` - An error object that indicates why the request failed, or `nil` if the request was successful. On failed execution, `error` may contain an `NSError` with `AWSIoTErrorDomain` domain and the following error code: `AWSIoTErrorThrottling`, `AWSIoTErrorConflict`, `AWSIoTErrorInternalServer`, `AWSIoTErrorValidation`, `AWSIoTErrorResourceNotFound`. @see AWSIoTUpdatePackageVersionRequest @see AWSIoTUpdatePackageVersionResponse diff --git a/AWSIoT/AWSIoTService.m b/AWSIoT/AWSIoTService.m index b3f35b27e59..8572ecb731b 100644 --- a/AWSIoT/AWSIoTService.m +++ b/AWSIoT/AWSIoTService.m @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ #import "AWSIoTResources.h" static NSString *const AWSInfoIoT = @"IoT"; -NSString *const AWSIoTSDKVersion = @"2.33.10"; +NSString *const AWSIoTSDKVersion = @"2.34.0"; @interface AWSIoTResponseSerializer : AWSJSONResponseSerializer diff --git a/AWSIoT/Info.plist b/AWSIoT/Info.plist index 00a733ed3c8..f13059556e3 100644 --- a/AWSIoT/Info.plist +++ b/AWSIoT/Info.plist @@ -15,7 +15,7 @@The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).
+The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).
*/ @property (nonatomic, strong) AWSFirehoseAmazonOpenSearchServerlessRetryOptions * _Nullable retryOptions; /** -The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.
+The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.
*/ @property (nonatomic, strong) NSString * _Nullable roleARN; /** -Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.
+Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.
*/ @property (nonatomic, assign) AWSFirehoseAmazonOpenSearchServerlessS3BackupMode s3BackupMode; @@ -499,12 +520,12 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; /** -The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).
+The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).
*/ @property (nonatomic, strong) AWSFirehoseAmazonOpenSearchServerlessRetryOptions * _Nullable retryOptions; /** -The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.
+The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.
*/ @property (nonatomic, strong) NSString * _Nullable roleARN; @@ -516,13 +537,13 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service.
+Configures retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service.
*/ @interface AWSFirehoseAmazonOpenSearchServerlessRetryOptions : AWSModel /** -After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
+After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
*/ @property (nonatomic, strong) NSNumber * _Nullable durationInSeconds; @@ -569,7 +590,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) NSString * _Nullable clusterEndpoint; /** -Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
+Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
*/ @property (nonatomic, strong) AWSFirehoseDocumentIdOptions * _Nullable documentIdOptions; @@ -594,17 +615,17 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; /** -The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).
+The retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).
*/ @property (nonatomic, strong) AWSFirehoseAmazonopensearchserviceRetryOptions * _Nullable retryOptions; /** -The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.
+The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.
*/ @property (nonatomic, strong) NSString * _Nullable roleARN; /** -Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.
+Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.
*/ @property (nonatomic, assign) AWSFirehoseAmazonopensearchserviceS3BackupMode s3BackupMode; @@ -614,7 +635,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseS3DestinationConfiguration * _Nullable s3Configuration; /** -The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.
+The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during run time.
*/ @property (nonatomic, strong) NSString * _Nullable typeName; @@ -642,12 +663,12 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseCloudWatchLoggingOptions * _Nullable cloudWatchLoggingOptions; /** -The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch Service.
+The endpoint to use when communicating with the cluster. Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch Service.
*/ @property (nonatomic, strong) NSString * _Nullable clusterEndpoint; /** -Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
+Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
*/ @property (nonatomic, strong) AWSFirehoseDocumentIdOptions * _Nullable documentIdOptions; @@ -725,7 +746,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) NSString * _Nullable clusterEndpoint; /** -Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
+Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
*/ @property (nonatomic, strong) AWSFirehoseDocumentIdOptions * _Nullable documentIdOptions; @@ -750,12 +771,12 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; /** -The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).
+The retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).
*/ @property (nonatomic, strong) AWSFirehoseAmazonopensearchserviceRetryOptions * _Nullable retryOptions; /** -The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.
+The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.
*/ @property (nonatomic, strong) NSString * _Nullable roleARN; @@ -765,20 +786,20 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseS3DestinationUpdate * _Nullable s3Update; /** -The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.
If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.
+The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.
If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.
*/ @property (nonatomic, strong) NSString * _Nullable typeName; @end /** -Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service.
+Configures retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service.
*/ @interface AWSFirehoseAmazonopensearchserviceRetryOptions : AWSModel /** -After an initial failure to deliver to Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
+After an initial failure to deliver to Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
*/ @property (nonatomic, strong) NSNumber * _Nullable durationInSeconds; @@ -804,7 +825,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Kinesis Data Firehose might choose to use different values when it is optimal. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Firehose might choose to use different values when it is optimal. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Optional parameters to use with the Amazon Redshift COPY
command. For more information, see the "Optional Parameters" section of Amazon Redshift COPY command. Some possible examples that would apply to Kinesis Data Firehose are as follows:
delimiter '\t' lzop;
- fields are delimited with "\t" (TAB character) and compressed using lzop.
delimiter '|'
- fields are delimited with "|" (this is the default delimiter).
delimiter '|' escape
- the delimiter should be escaped.
fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6'
- fields are fixed width in the source, with each width specified after every column in the table.
JSON 's3://mybucket/jsonpaths.txt'
- data is in JSON format, and the path specified is the format of the data.
For more examples, see Amazon Redshift COPY command examples.
+Optional parameters to use with the Amazon Redshift COPY
command. For more information, see the "Optional Parameters" section of Amazon Redshift COPY command. Some possible examples that would apply to Firehose are as follows:
delimiter '\t' lzop;
- fields are delimited with "\t" (TAB character) and compressed using lzop.
delimiter '|'
- fields are delimited with "|" (this is the default delimiter).
delimiter '|' escape
- the delimiter should be escaped.
fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6'
- fields are fixed width in the source, with each width specified after every column in the table.
JSON 's3://mybucket/jsonpaths.txt'
- data is in JSON format, and the path specified is the format of the data.
For more examples, see Amazon Redshift COPY command examples.
*/ @property (nonatomic, strong) NSString * _Nullable replicateOptions; @@ -934,6 +955,11 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { */ @property (nonatomic, strong) AWSFirehoseS3DestinationConfiguration * _Nullable s3DestinationConfiguration; +/** +Configure Snowflake destination
+ */ +@property (nonatomic, strong) AWSFirehoseSnowflakeDestinationConfiguration * _Nullable snowflakeDestinationConfiguration; + /**The destination in Splunk. You can specify only one destination.
*/ @@ -960,7 +986,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -Specifies that you want Kinesis Data Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Kinesis Data Firehose uses the serializer and deserializer that you specify, in addition to the column information from the Amazon Web Services Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Kinesis Data Firehose Record Format Conversion.
+Specifies that you want Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Firehose uses the serializer and deserializer that you specify, in addition to the column information from the Amazon Web Services Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Firehose Record Format Conversion.
*/ @interface AWSFirehoseDataFormatConversionConfiguration : AWSModel @@ -971,12 +997,12 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) NSNumber * _Nullable enabled; /** -Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled
is set to true.
Specifies the deserializer that you want Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled
is set to true.
Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled
is set to true.
Specifies the serializer that you want Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled
is set to true.
Set this to true if you want to delete the delivery stream even if Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Kinesis Data Firehose keeps retrying the delete operation.
The default value is false.
+Set this to true if you want to delete the delivery stream even if Firehose is unable to retire the grant for the CMK. Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the delete operation.
The default value is false.
*/ @property (nonatomic, strong) NSNumber * _Nullable allowForceDelete; @@ -1118,12 +1144,12 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { /** -If you set KeyType
to CUSTOMER_MANAGED_CMK
, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType
to Amazon Web Services_OWNED_CMK
, Kinesis Data Firehose uses a service-account CMK.
If you set KeyType
to CUSTOMER_MANAGED_CMK
, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType
to Amazon Web Services_OWNED_CMK
, Firehose uses a service-account CMK.
Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK
. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType
set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.
When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.
You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Kinesis Data Firehose throws a LimitExceededException
.
To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.
Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK
. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType
set to CUSTOMER_MANAGED_CMK, Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Firehose service to use the customer managed CMK to perform encryption and decryption. Firehose manages that grant.
When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Firehose schedules the grant it had on the old CMK for retirement.
You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Firehose throws a LimitExceededException
.
To encrypt your delivery stream, use symmetric CMKs. Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.
The ID of the destination to start returning the destination information. Kinesis Data Firehose supports one destination per delivery stream.
+The ID of the destination to start returning the destination information. Firehose supports one destination per delivery stream.
*/ @property (nonatomic, strong) NSString * _Nullable exclusiveStartDestinationId; @@ -1166,18 +1192,18 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -The deserializer you want Kinesis Data Firehose to use for converting the input data from JSON. Kinesis Data Firehose then serializes the data to its final format using the Serializer. Kinesis Data Firehose supports two types of deserializers: the Apache Hive JSON SerDe and the OpenX JSON SerDe.
+The deserializer you want Firehose to use for converting the input data from JSON. Firehose then serializes the data to its final format using the Serializer. Firehose supports two types of deserializers: the Apache Hive JSON SerDe and the OpenX JSON SerDe.
*/ @interface AWSFirehoseDeserializer : AWSModel /** -The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.
+The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.
*/ @property (nonatomic, strong) AWSFirehoseHiveJsonSerDe * _Nullable hiveJsonSerDe; /** -The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.
+The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.
*/ @property (nonatomic, strong) AWSFirehoseOpenXJsonSerDe * _Nullable openXJsonSerDe; @@ -1230,6 +1256,11 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { */ @property (nonatomic, strong) AWSFirehoseS3DestinationDescription * _Nullable s3DestinationDescription; +/** +Optional description for the destination
+ */ +@property (nonatomic, strong) AWSFirehoseSnowflakeDestinationDescription * _Nullable snowflakeDestinationDescription; + /**The destination in Splunk.
*/ @@ -1238,14 +1269,14 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
+Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
Required parameters: [DefaultDocumentIdFormat] */ @interface AWSFirehoseDocumentIdOptions : AWSModel /** -When the FIREHOSE_DEFAULT
option is chosen, Kinesis Data Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.
When the NO_DOCUMENT_ID
option is chosen, Kinesis Data Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.
When the FIREHOSE_DEFAULT
option is chosen, Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.
When the NO_DOCUMENT_ID
option is chosen, Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.
Specifies that the dynamic partitioning is enabled for this Kinesis Data Firehose delivery stream.
+Specifies that the dynamic partitioning is enabled for this Firehose delivery stream.
*/ @property (nonatomic, strong) NSNumber * _Nullable enabled; /** -The retry behavior in case Kinesis Data Firehose is unable to deliver data to an Amazon S3 prefix.
+The retry behavior in case Firehose is unable to deliver data to an Amazon S3 prefix.
*/ @property (nonatomic, strong) AWSFirehoseRetryOptions * _Nullable retryOptions; @@ -1310,7 +1341,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) NSString * _Nullable clusterEndpoint; /** -Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
+Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
*/ @property (nonatomic, strong) AWSFirehoseDocumentIdOptions * _Nullable documentIdOptions; @@ -1335,17 +1366,17 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; /** -The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
+The retry behavior in case Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
*/ @property (nonatomic, strong) AWSFirehoseElasticsearchRetryOptions * _Nullable retryOptions; /** -The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
+The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
*/ @property (nonatomic, strong) NSString * _Nullable roleARN; /** -Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly
, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/
appended to the key prefix. When set to AllDocuments
, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/
appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly
.
You can't change this backup mode after you create the delivery stream.
+Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly
, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/
appended to the key prefix. When set to AllDocuments
, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/
appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly
.
You can't change this backup mode after you create the delivery stream.
*/ @property (nonatomic, assign) AWSFirehoseElasticsearchS3BackupMode s3BackupMode; @@ -1355,7 +1386,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseS3DestinationConfiguration * _Nullable s3Configuration; /** -The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.
For Elasticsearch 7.x, don't specify a TypeName
.
The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during run time.
For Elasticsearch 7.x, don't specify a TypeName
.
The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint
or the DomainARN
field to send data to Amazon ES.
The endpoint to use when communicating with the cluster. Firehose uses either this ClusterEndpoint
or the DomainARN
field to send data to Amazon ES.
Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
+Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
*/ @property (nonatomic, strong) AWSFirehoseDocumentIdOptions * _Nullable documentIdOptions; /** -The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
Kinesis Data Firehose uses either ClusterEndpoint
or DomainARN
to send data to Amazon ES.
The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
Firehose uses either ClusterEndpoint
or DomainARN
to send data to Amazon ES.
Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
+Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
*/ @property (nonatomic, strong) AWSFirehoseDocumentIdOptions * _Nullable documentIdOptions; @@ -1491,12 +1522,12 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; /** -The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
+The retry behavior in case Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
*/ @property (nonatomic, strong) AWSFirehoseElasticsearchRetryOptions * _Nullable retryOptions; /** -The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
+The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
*/ @property (nonatomic, strong) NSString * _Nullable roleARN; @@ -1506,20 +1537,20 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseS3DestinationUpdate * _Nullable s3Update; /** -The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.
If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName
.
The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.
If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName
.
Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES.
+Configures retry behavior in case Firehose is unable to deliver documents to Amazon ES.
*/ @interface AWSFirehoseElasticsearchRetryOptions : AWSModel /** -After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
+After an initial failure to deliver to Amazon ES, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
*/ @property (nonatomic, strong) NSNumber * _Nullable durationInSeconds; @@ -1570,6 +1601,11 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { */ @property (nonatomic, assign) AWSFirehoseCompressionFormat compressionFormat; +/** +The time zone you prefer. UTC is the default.
+ */ +@property (nonatomic, strong) NSString * _Nullable customTimeZone; + /**The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
*/ @@ -1586,10 +1622,15 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseEncryptionConfiguration * _Nullable encryptionConfiguration; /** -A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
+A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
*/ @property (nonatomic, strong) NSString * _Nullable errorOutputPrefix; +/** +Specify a file extension. It will override the default file extension
+ */ +@property (nonatomic, strong) NSString * _Nullable fileExtension; + /**The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects.
*/ @@ -1644,6 +1685,11 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { */ @property (nonatomic, assign) AWSFirehoseCompressionFormat compressionFormat; +/** +The time zone you prefer. UTC is the default.
+ */ +@property (nonatomic, strong) NSString * _Nullable customTimeZone; + /**The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
*/ @@ -1660,10 +1706,15 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseEncryptionConfiguration * _Nullable encryptionConfiguration; /** -A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
+A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
*/ @property (nonatomic, strong) NSString * _Nullable errorOutputPrefix; +/** +Specify a file extension. It will override the default file extension
+ */ +@property (nonatomic, strong) NSString * _Nullable fileExtension; + /**The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects.
*/ @@ -1717,6 +1768,11 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { */ @property (nonatomic, assign) AWSFirehoseCompressionFormat compressionFormat; +/** +The time zone you prefer. UTC is the default.
+ */ +@property (nonatomic, strong) NSString * _Nullable customTimeZone; + /**The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
*/ @@ -1733,10 +1789,15 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseEncryptionConfiguration * _Nullable encryptionConfiguration; /** -A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
+A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
*/ @property (nonatomic, strong) NSString * _Nullable errorOutputPrefix; +/** +Specify a file extension. It will override the default file extension
+ */ +@property (nonatomic, strong) NSString * _Nullable fileExtension; + /**The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects.
*/ @@ -1784,20 +1845,20 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.
+The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.
*/ @interface AWSFirehoseHiveJsonSerDe : AWSModel /** -Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis
to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf
by default.
Indicates how you want Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis
to parse timestamps in epoch milliseconds. If you don't specify a format, Firehose uses java.sql.Timestamp::valueOf
by default.
Describes the buffering options that can be applied before data is delivered to the HTTP endpoint destination. Kinesis Data Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Describes the buffering options that can be applied before data is delivered to the HTTP endpoint destination. Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
The buffering options that can be used before data is delivered to the specified destination. Kinesis Data Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if you specify a value for one of them, you must also provide a value for the other.
The buffering options that can be used before data is delivered to the specified destination. Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if you specify a value for one of them, you must also provide a value for the other.
Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
+Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
*/ @property (nonatomic, strong) AWSFirehoseHttpEndpointRetryOptions * _Nullable retryOptions; /** -Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.
+Firehose uses this IAM role for all the permissions that the delivery stream needs.
*/ @property (nonatomic, strong) NSString * _Nullable roleARN; /** -Describes the S3 bucket backup options for the data that Kinesis Data Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData
) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly
).
Describes the S3 bucket backup options for the data that Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData
) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly
).
Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
+Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
*/ @property (nonatomic, strong) AWSFirehoseHttpEndpointRetryOptions * _Nullable retryOptions; /** -Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.
+Firehose uses this IAM role for all the permissions that the delivery stream needs.
*/ @property (nonatomic, strong) NSString * _Nullable roleARN; /** -Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData
) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly
).
Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData
) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly
).
Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
+Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
*/ @property (nonatomic, strong) AWSFirehoseHttpEndpointRetryOptions * _Nullable retryOptions; /** -Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.
+Firehose uses this IAM role for all the permissions that the delivery stream needs.
*/ @property (nonatomic, strong) NSString * _Nullable roleARN; /** -Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData
) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly
).
Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData
) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly
).
Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation.
+Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation.
*/ @property (nonatomic, assign) AWSFirehoseContentEncoding contentEncoding; @end /** -Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
+Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
*/ @interface AWSFirehoseHttpEndpointRetryOptions : AWSModel /** -The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from the specified destination after each attempt.
+The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails. It doesn't include the periods during which Firehose waits for acknowledgment from the specified destination after each attempt.
*/ @property (nonatomic, strong) NSNumber * _Nullable durationInSeconds; @@ -2113,13 +2174,13 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.
+Details about a Kinesis data stream used as the source for a Firehose delivery stream.
*/ @interface AWSFirehoseKinesisStreamSourceDescription : AWSModel /** -Kinesis Data Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.
+Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.
*/ @property (nonatomic, strong) NSDate * _Nullable deliveryStartTimestamp; @@ -2242,7 +2303,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -Details about the Amazon MSK cluster used as the source for a Kinesis Data Firehose delivery stream.
+Details about the Amazon MSK cluster used as the source for a Firehose delivery stream.
*/ @interface AWSFirehoseMSKSourceDescription : AWSModel @@ -2253,7 +2314,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseAuthenticationConfiguration * _Nullable authenticationConfiguration; /** -Kinesis Data Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.
+Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.
*/ @property (nonatomic, strong) NSDate * _Nullable deliveryStartTimestamp; @@ -2270,13 +2331,13 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.
+The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.
*/ @interface AWSFirehoseOpenXJsonSerDe : AWSModel /** -When set to true
, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
When set to true
, which is the default, Firehose converts JSON keys to lowercase before deserializing them.
When set to true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option.
The default is false
.
When set to true
, specifies that the names of the keys include dots and that you want Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option.
The default is false
.
The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
+The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.
*/ @property (nonatomic, strong) NSNumber * _Nullable blockSizeBytes; /** -The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null
.
The column names for which you want Firehose to create bloom filters. The default is null
.
A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.
For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.
Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false
.
A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.
For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.
Firehose ignores this parameter when OrcSerDe$EnablePadding is false
.
Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if Enabled
is set to true.
Specifies the serializer that you want Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if Enabled
is set to true.
The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
+The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.
*/ @property (nonatomic, strong) NSNumber * _Nullable blockSizeBytes; @@ -2420,7 +2481,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -Describes a data processor.
+Describes a data processor.
If you want to add a new line delimiter between records in objects that are delivered to Amazon S3, choose AppendDelimiterToRecord
as a processor type. You don’t have to put a processor parameter when you select AppendDelimiterToRecord
.
The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
+The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
*/ @property (nonatomic, strong) AWSFirehoseRedshiftRetryOptions * _Nullable retryOptions; @@ -2663,7 +2724,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; /** -The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
+The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
*/ @property (nonatomic, strong) AWSFirehoseRedshiftRetryOptions * _Nullable retryOptions; @@ -2726,7 +2787,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; /** -The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
+The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
*/ @property (nonatomic, strong) AWSFirehoseRedshiftRetryOptions * _Nullable retryOptions; @@ -2758,26 +2819,26 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift.
+Configures retry behavior in case Firehose is unable to deliver documents to Amazon Redshift.
*/ @interface AWSFirehoseRedshiftRetryOptions : AWSModel /** -The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds
is 0 (zero) or if the first delivery attempt takes longer than the current value.
The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds
is 0 (zero) or if the first delivery attempt takes longer than the current value.
The retry behavior in case Kinesis Data Firehose is unable to deliver data to an Amazon S3 prefix.
+The retry behavior in case Firehose is unable to deliver data to an Amazon S3 prefix.
*/ @interface AWSFirehoseRetryOptions : AWSModel /** -The period of time during which Kinesis Data Firehose retries to deliver data to the specified Amazon S3 prefix.
+The period of time during which Firehose retries to deliver data to the specified Amazon S3 prefix.
*/ @property (nonatomic, strong) NSNumber * _Nullable durationInSeconds; @@ -2816,7 +2877,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseEncryptionConfiguration * _Nullable encryptionConfiguration; /** -A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
+A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
*/ @property (nonatomic, strong) NSString * _Nullable errorOutputPrefix; @@ -2865,7 +2926,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseEncryptionConfiguration * _Nullable encryptionConfiguration; /** -A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
+A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
*/ @property (nonatomic, strong) NSString * _Nullable errorOutputPrefix; @@ -2913,7 +2974,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseEncryptionConfiguration * _Nullable encryptionConfiguration; /** -A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
+A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
*/ @property (nonatomic, strong) NSString * _Nullable errorOutputPrefix; @@ -2930,7 +2991,7 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -Specifies the schema to which you want Kinesis Data Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled
is set to true.
Specifies the schema to which you want Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled
is set to true.
The role that Kinesis Data Firehose can use to access Amazon Web Services Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
If the SchemaConfiguration
request parameter is used as part of invoking the CreateDeliveryStream
API, then the RoleARN
property is required and its value must be specified.
The role that Firehose can use to access Amazon Web Services Glue. This role must be in the same account you use for Firehose. Cross-account roles aren't allowed.
If the SchemaConfiguration
request parameter is used as part of invoking the CreateDeliveryStream
API, then the RoleARN
property is required and its value must be specified.
Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST
, Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.
Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST
, Firehose uses the most recent version. This means that any updates to the table are automatically picked up.
The serializer that you want Kinesis Data Firehose to use to convert data to the target format before writing it to Amazon S3. Kinesis Data Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.
+The serializer that you want Firehose to use to convert data to the target format before writing it to Amazon S3. Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.
*/ @interface AWSFirehoseSerializer : AWSModel @@ -2986,7 +3047,332 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @end /** -Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.
+Configure Snowflake destination
+ Required parameters: [AccountUrl, PrivateKey, User, Database, Schema, Table, RoleARN, S3Configuration] + */ +@interface AWSFirehoseSnowflakeDestinationConfiguration : AWSModel + + +/** +URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional.
+ */ +@property (nonatomic, strong) NSString * _Nullable accountUrl; + +/** +Describes the Amazon CloudWatch logging options for your delivery stream.
+ */ +@property (nonatomic, strong) AWSFirehoseCloudWatchLoggingOptions * _Nullable cloudWatchLoggingOptions; + +/** +The name of the record content column
+ */ +@property (nonatomic, strong) NSString * _Nullable contentColumnName; + +/** +Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.
+ */ +@property (nonatomic, assign) AWSFirehoseSnowflakeDataLoadingOption dataLoadingOption; + +/** +All data in Snowflake is maintained in databases.
+ */ +@property (nonatomic, strong) NSString * _Nullable database; + +/** +Passphrase to decrypt the private key when the key is encrypted. For information, see Using Key Pair Authentication & Key Rotation.
+ */ +@property (nonatomic, strong) NSString * _Nullable keyPassphrase; + +/** +The name of the record metadata column
+ */ +@property (nonatomic, strong) NSString * _Nullable metaDataColumnName; + +/** +The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.
+ */ +@property (nonatomic, strong) NSString * _Nullable privateKey; + +/** +Describes a data processing configuration.
+ */ +@property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; + +/** +The time period where Firehose will retry sending data to the chosen HTTP endpoint.
+ */ +@property (nonatomic, strong) AWSFirehoseSnowflakeRetryOptions * _Nullable retryOptions; + +/** +The Amazon Resource Name (ARN) of the Snowflake role
+ */ +@property (nonatomic, strong) NSString * _Nullable roleARN; + +/** +Choose an S3 backup mode
+ */ +@property (nonatomic, assign) AWSFirehoseSnowflakeS3BackupMode s3BackupMode; + +/** +Describes the configuration of a destination in Amazon S3.
+ */ +@property (nonatomic, strong) AWSFirehoseS3DestinationConfiguration * _Nullable s3Configuration; + +/** +Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views
+ */ +@property (nonatomic, strong) NSString * _Nullable schema; + +/** +Optionally configure a Snowflake role. Otherwise the default user role will be used.
+ */ +@property (nonatomic, strong) AWSFirehoseSnowflakeRoleConfiguration * _Nullable snowflakeRoleConfiguration; + +/** +The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake
+ */ +@property (nonatomic, strong) AWSFirehoseSnowflakeVpcConfiguration * _Nullable snowflakeVpcConfiguration; + +/** +All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.
+ */ +@property (nonatomic, strong) NSString * _Nullable table; + +/** +User login name for the Snowflake account.
+ */ +@property (nonatomic, strong) NSString * _Nullable user; + +@end + +/** +Optional Snowflake destination description
+ */ +@interface AWSFirehoseSnowflakeDestinationDescription : AWSModel + + +/** +URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional.
+ */ +@property (nonatomic, strong) NSString * _Nullable accountUrl; + +/** +Describes the Amazon CloudWatch logging options for your delivery stream.
+ */ +@property (nonatomic, strong) AWSFirehoseCloudWatchLoggingOptions * _Nullable cloudWatchLoggingOptions; + +/** +The name of the record content column
+ */ +@property (nonatomic, strong) NSString * _Nullable contentColumnName; + +/** +Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.
+ */ +@property (nonatomic, assign) AWSFirehoseSnowflakeDataLoadingOption dataLoadingOption; + +/** +All data in Snowflake is maintained in databases.
+ */ +@property (nonatomic, strong) NSString * _Nullable database; + +/** +The name of the record metadata column
+ */ +@property (nonatomic, strong) NSString * _Nullable metaDataColumnName; + +/** +Describes a data processing configuration.
+ */ +@property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; + +/** +The time period where Firehose will retry sending data to the chosen HTTP endpoint.
+ */ +@property (nonatomic, strong) AWSFirehoseSnowflakeRetryOptions * _Nullable retryOptions; + +/** +The Amazon Resource Name (ARN) of the Snowflake role
+ */ +@property (nonatomic, strong) NSString * _Nullable roleARN; + +/** +Choose an S3 backup mode
+ */ +@property (nonatomic, assign) AWSFirehoseSnowflakeS3BackupMode s3BackupMode; + +/** +Describes a destination in Amazon S3.
+ */ +@property (nonatomic, strong) AWSFirehoseS3DestinationDescription * _Nullable s3DestinationDescription; + +/** +Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views
+ */ +@property (nonatomic, strong) NSString * _Nullable schema; + +/** +Optionally configure a Snowflake role. Otherwise the default user role will be used.
+ */ +@property (nonatomic, strong) AWSFirehoseSnowflakeRoleConfiguration * _Nullable snowflakeRoleConfiguration; + +/** +The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake
+ */ +@property (nonatomic, strong) AWSFirehoseSnowflakeVpcConfiguration * _Nullable snowflakeVpcConfiguration; + +/** +All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.
+ */ +@property (nonatomic, strong) NSString * _Nullable table; + +/** +User login name for the Snowflake account.
+ */ +@property (nonatomic, strong) NSString * _Nullable user; + +@end + +/** +Update to configuration settings
+ */ +@interface AWSFirehoseSnowflakeDestinationUpdate : AWSModel + + +/** +URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional.
+ */ +@property (nonatomic, strong) NSString * _Nullable accountUrl; + +/** +Describes the Amazon CloudWatch logging options for your delivery stream.
+ */ +@property (nonatomic, strong) AWSFirehoseCloudWatchLoggingOptions * _Nullable cloudWatchLoggingOptions; + +/** +The name of the content metadata column
+ */ +@property (nonatomic, strong) NSString * _Nullable contentColumnName; + +/** +JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.
+ */ +@property (nonatomic, assign) AWSFirehoseSnowflakeDataLoadingOption dataLoadingOption; + +/** +All data in Snowflake is maintained in databases.
+ */ +@property (nonatomic, strong) NSString * _Nullable database; + +/** +Passphrase to decrypt the private key when the key is encrypted. For information, see Using Key Pair Authentication & Key Rotation.
+ */ +@property (nonatomic, strong) NSString * _Nullable keyPassphrase; + +/** +The name of the record metadata column
+ */ +@property (nonatomic, strong) NSString * _Nullable metaDataColumnName; + +/** +The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.
+ */ +@property (nonatomic, strong) NSString * _Nullable privateKey; + +/** +Describes a data processing configuration.
+ */ +@property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; + +/** +Specify how long Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Firehose to retry sending data, set this value to 0.
+ */ +@property (nonatomic, strong) AWSFirehoseSnowflakeRetryOptions * _Nullable retryOptions; + +/** +The Amazon Resource Name (ARN) of the Snowflake role
+ */ +@property (nonatomic, strong) NSString * _Nullable roleARN; + +/** +Choose an S3 backup mode
+ */ +@property (nonatomic, assign) AWSFirehoseSnowflakeS3BackupMode s3BackupMode; + +/** +Describes an update for a destination in Amazon S3.
+ */ +@property (nonatomic, strong) AWSFirehoseS3DestinationUpdate * _Nullable s3Update; + +/** +Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views
+ */ +@property (nonatomic, strong) NSString * _Nullable schema; + +/** +Optionally configure a Snowflake role. Otherwise the default user role will be used.
+ */ +@property (nonatomic, strong) AWSFirehoseSnowflakeRoleConfiguration * _Nullable snowflakeRoleConfiguration; + +/** +All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.
+ */ +@property (nonatomic, strong) NSString * _Nullable table; + +/** +User login name for the Snowflake account.
+ */ +@property (nonatomic, strong) NSString * _Nullable user; + +@end + +/** +Specify how long Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Firehose to retry sending data, set this value to 0.
+ */ +@interface AWSFirehoseSnowflakeRetryOptions : AWSModel + + +/** +the time period where Firehose will retry sending data to the chosen HTTP endpoint.
+ */ +@property (nonatomic, strong) NSNumber * _Nullable durationInSeconds; + +@end + +/** +Optionally configure a Snowflake role. Otherwise the default user role will be used.
+ */ +@interface AWSFirehoseSnowflakeRoleConfiguration : AWSModel + + +/** +Enable Snowflake role
+ */ +@property (nonatomic, strong) NSNumber * _Nullable enabled; + +/** +The Snowflake role you wish to configure
+ */ +@property (nonatomic, strong) NSString * _Nullable snowflakeRole; + +@end + +/** +Configure a Snowflake VPC
+ Required parameters: [PrivateLinkVpceId] + */ +@interface AWSFirehoseSnowflakeVpcConfiguration : AWSModel + + +/** +The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake
+ */ +@property (nonatomic, strong) NSString * _Nullable privateLinkVpceId; + +@end + +/** +Details about a Kinesis data stream used as the source for a Firehose delivery stream.
*/ @interface AWSFirehoseSourceDescription : AWSModel @@ -3039,12 +3425,12 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseCloudWatchLoggingOptions * _Nullable cloudWatchLoggingOptions; /** -The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
+The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.
*/ @property (nonatomic, strong) NSNumber * _Nullable HECAcknowledgmentTimeoutInSeconds; /** -The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
+The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.
*/ @property (nonatomic, strong) NSString * _Nullable HECEndpoint; @@ -3064,12 +3450,12 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; /** -The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.
+The retry behavior in case Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.
*/ @property (nonatomic, strong) AWSFirehoseSplunkRetryOptions * _Nullable retryOptions; /** -Defines how documents should be delivered to Amazon S3. When set to FailedEventsOnly
, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents
, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly
.
You can update this backup mode from FailedEventsOnly
to AllEvents
. You can't update it from AllEvents
to FailedEventsOnly
.
Defines how documents should be delivered to Amazon S3. When set to FailedEventsOnly
, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents
, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly
.
You can update this backup mode from FailedEventsOnly
to AllEvents
. You can't update it from AllEvents
to FailedEventsOnly
.
The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
+The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.
*/ @property (nonatomic, strong) NSNumber * _Nullable HECAcknowledgmentTimeoutInSeconds; /** -The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
+The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.
*/ @property (nonatomic, strong) NSString * _Nullable HECEndpoint; @@ -3122,12 +3508,12 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; /** -The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
+The retry behavior in case Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
*/ @property (nonatomic, strong) AWSFirehoseSplunkRetryOptions * _Nullable retryOptions; /** -Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly
, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments
, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly
.
Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly
, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments
, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly
.
The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
+The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.
*/ @property (nonatomic, strong) NSNumber * _Nullable HECAcknowledgmentTimeoutInSeconds; /** -The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
+The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.
*/ @property (nonatomic, strong) NSString * _Nullable HECEndpoint; @@ -3180,12 +3566,12 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { @property (nonatomic, strong) AWSFirehoseProcessingConfiguration * _Nullable processingConfiguration; /** -The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
+The retry behavior in case Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
*/ @property (nonatomic, strong) AWSFirehoseSplunkRetryOptions * _Nullable retryOptions; /** -Specifies how you want Kinesis Data Firehose to back up documents to Amazon S3. When set to FailedDocumentsOnly
, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents
, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly
.
You can update this backup mode from FailedEventsOnly
to AllEvents
. You can't update it from AllEvents
to FailedEventsOnly
.
Specifies how you want Firehose to back up documents to Amazon S3. When set to FailedDocumentsOnly
, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents
, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly
.
You can update this backup mode from FailedEventsOnly
to AllEvents
. You can't update it from AllEvents
to FailedEventsOnly
.
Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Splunk, or if it doesn't receive an acknowledgment from Splunk.
+Configures retry behavior in case Firehose is unable to deliver documents to Splunk, or if it doesn't receive an acknowledgment from Splunk.
*/ @interface AWSFirehoseSplunkRetryOptions : AWSModel /** -The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.
+The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Firehose waits for acknowledgment from Splunk after each attempt.
*/ @property (nonatomic, strong) NSNumber * _Nullable durationInSeconds; @@ -3383,6 +3769,11 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { */ @property (nonatomic, strong) AWSFirehoseS3DestinationUpdate * _Nullable s3DestinationUpdate; +/** +Update to the Snowflake destination condiguration settings
+ */ +@property (nonatomic, strong) AWSFirehoseSnowflakeDestinationUpdate * _Nullable snowflakeDestinationUpdate; + /**Describes an update for a destination in Splunk.
*/ @@ -3406,17 +3797,17 @@ typedef NS_ENUM(NSInteger, AWSFirehoseSplunkS3BackupMode) { /** -The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC. You can use your existing Kinesis Data Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Kinesis Data Firehose service principal and that it grants the following permissions:
ec2:DescribeVpcs
ec2:DescribeVpcAttribute
ec2:DescribeSubnets
ec2:DescribeSecurityGroups
ec2:DescribeNetworkInterfaces
ec2:CreateNetworkInterface
ec2:CreateNetworkInterfacePermission
ec2:DeleteNetworkInterface
If you revoke these permissions after you create the delivery stream, Kinesis Data Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.
+The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:
ec2:DescribeVpcs
ec2:DescribeVpcAttribute
ec2:DescribeSubnets
ec2:DescribeSecurityGroups
ec2:DescribeNetworkInterfaces
ec2:CreateNetworkInterface
ec2:CreateNetworkInterfacePermission
ec2:DeleteNetworkInterface
When you specify subnets for delivering data to the destination in a private VPC, make sure you have enough number of free IP addresses in chosen subnets. If there is no available free IP address in a specified subnet, Firehose cannot create or add ENIs for the data delivery in the private VPC, and the delivery will be degraded or fail.
The IDs of the security groups that you want Kinesis Data Firehose to use when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups here, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.
+The IDs of the security groups that you want Firehose to use when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups here, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.
*/ @property (nonatomic, strong) NSArrayThe IDs of the subnets that you want Kinesis Data Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
+The IDs of the subnets that you want Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
*/ @property (nonatomic, strong) NSArrayThe ARN of the IAM role that the delivery stream uses to create endpoints in the destination VPC. You can use your existing Kinesis Data Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Kinesis Data Firehose service principal and that it grants the following permissions:
ec2:DescribeVpcs
ec2:DescribeVpcAttribute
ec2:DescribeSubnets
ec2:DescribeSecurityGroups
ec2:DescribeNetworkInterfaces
ec2:CreateNetworkInterface
ec2:CreateNetworkInterfacePermission
ec2:DeleteNetworkInterface
If you revoke these permissions after you create the delivery stream, Kinesis Data Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.
+The ARN of the IAM role that the delivery stream uses to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:
ec2:DescribeVpcs
ec2:DescribeVpcAttribute
ec2:DescribeSubnets
ec2:DescribeSecurityGroups
ec2:DescribeNetworkInterfaces
ec2:CreateNetworkInterface
ec2:CreateNetworkInterfacePermission
ec2:DeleteNetworkInterface
If you revoke these permissions after you create the delivery stream, Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.
*/ @property (nonatomic, strong) NSString * _Nullable roleARN; /** -The IDs of the security groups that Kinesis Data Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.
+The IDs of the security groups that Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.
*/ @property (nonatomic, strong) NSArrayThe IDs of the subnets that Kinesis Data Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
+The IDs of the subnets that Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
*/ @property (nonatomic, strong) NSArrayCreates a Kinesis Data Firehose delivery stream.
By default, you can create up to 50 delivery streams per Amazon Web Services Region.
This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING
. After the delivery stream is created, its status is ACTIVE
and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED
. Attempts to send data to a delivery stream that is not in the ACTIVE
state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.
If the status of a delivery stream is CREATING_FAILED
, this status doesn't change, and you can't invoke CreateDeliveryStream
again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.
A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType
parameter to KinesisStreamAsSource
, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration
parameter.
To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.
A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration
, S3DestinationConfiguration
, ElasticsearchDestinationConfiguration
, RedshiftDestinationConfiguration
, or SplunkDestinationConfiguration
.
When you specify S3DestinationConfiguration
, you can also provide the following optional values: BufferingHints, EncryptionConfiguration
, and CompressionFormat
. By default, if no BufferingHints
value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints
is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.
A few notes about Amazon Redshift as a destination:
An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY
syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration
parameter.
The compression formats SNAPPY
or ZIP
cannot be specified in RedshiftDestinationConfiguration.S3Configuration
because the Amazon Redshift COPY
operation that reads from the S3 bucket doesn't support these compression formats.
We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT
permissions.
Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
\"\ + \"documentation\":\"Creates a Firehose delivery stream.
By default, you can create up to 50 delivery streams per Amazon Web Services Region.
This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING
. After the delivery stream is created, its status is ACTIVE
and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED
. Attempts to send data to a delivery stream that is not in the ACTIVE
state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.
If the status of a delivery stream is CREATING_FAILED
, this status doesn't change, and you can't invoke CreateDeliveryStream
again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.
A Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType
parameter to KinesisStreamAsSource
, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration
parameter.
To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.
A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration
, S3DestinationConfiguration
, ElasticsearchDestinationConfiguration
, RedshiftDestinationConfiguration
, or SplunkDestinationConfiguration
.
When you specify S3DestinationConfiguration
, you can also provide the following optional values: BufferingHints, EncryptionConfiguration
, and CompressionFormat
. By default, if no BufferingHints
value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints
is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.
A few notes about Amazon Redshift as a destination:
An Amazon Redshift destination requires an S3 bucket as intermediate location. Firehose first delivers data to Amazon S3 and then uses COPY
syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration
parameter.
The compression formats SNAPPY
or ZIP
cannot be specified in RedshiftDestinationConfiguration.S3Configuration
because the Amazon Redshift COPY
operation that reads from the S3 bucket doesn't support these compression formats.
We strongly recommend that you use the user name and password you provide exclusively with Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT
permissions.
Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide.
\"\ },\ \"DeleteDeliveryStream\":{\ \"name\":\"DeleteDeliveryStream\",\ @@ -99,7 +99,7 @@ - (NSString *)definitionString { {\"shape\":\"ResourceInUseException\"},\ {\"shape\":\"ResourceNotFoundException\"}\ ],\ - \"documentation\":\"Deletes a delivery stream and its data.
To check the state of a delivery stream, use DescribeDeliveryStream. You can delete a delivery stream only if it is in one of the following states: ACTIVE
, DELETING
, CREATING_FAILED
, or DELETING_FAILED
. You can't delete a delivery stream that is in the CREATING
state. While the deletion request is in process, the delivery stream is in the DELETING
state.
While the delivery stream is in the DELETING
state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.
Deletes a delivery stream and its data.
You can delete a delivery stream only if it is in one of the following states: ACTIVE
, DELETING
, CREATING_FAILED
, or DELETING_FAILED
. You can't delete a delivery stream that is in the CREATING
state. To check the state of a delivery stream, use DescribeDeliveryStream.
DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the DELETING
state.While the delivery stream is in the DELETING
state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.
Removal of a delivery stream that is in the DELETING
state is a low priority operation for the service. A stream may remain in the DELETING
state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING
state to be removed.
Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.
By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.
Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.
Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\\\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecord
operation returns a RecordId
, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.
If the PutRecord
operation throws a ServiceUnavailableException
, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Writes a single data record into an Amazon Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.
By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Firehose Limits.
Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.
Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\\\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecord
operation returns a RecordId
, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.
If the PutRecord
operation throws a ServiceUnavailableException
, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.
Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.
Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
For information about service quota, see Amazon Kinesis Data Firehose Quota.
Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.
Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\\\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecordBatch response includes a count of failed records, FailedPutCount
, and an array of responses, RequestResponses
. Even if the PutRecordBatch call succeeds, the value of FailedPutCount
may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses
array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses
includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.
A successfully processed record includes a RecordId
value, which is unique for the record. An unsuccessfully processed record includes ErrorCode
and ErrorMessage
values. ErrorCode
reflects the type of error, and is one of the following values: ServiceUnavailableException
or InternalFailure
. ErrorMessage
provides more detailed information about the error.
If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount
is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.
If PutRecordBatch throws ServiceUnavailableException
, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.
Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
For information about service quota, see Amazon Firehose Quota.
Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.
Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\\\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecordBatch response includes a count of failed records, FailedPutCount
, and an array of responses, RequestResponses
. Even if the PutRecordBatch call succeeds, the value of FailedPutCount
may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses
array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses
includes both successfully and unsuccessfully processed records. Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.
A successfully processed record includes a RecordId
value, which is unique for the record. An unsuccessfully processed record includes ErrorCode
and ErrorMessage
values. ErrorCode
reflects the type of error, and is one of the following values: ServiceUnavailableException
or InternalFailure
. ErrorMessage
provides more detailed information about the error.
If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount
is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.
If PutRecordBatch throws ServiceUnavailableException
, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.
Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Enables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING
, and then to ENABLED
. The encryption status of a delivery stream is the Status
property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED
. You can continue to read and write data to your delivery stream while the encryption status is ENABLING
, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED
before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption status of a delivery stream, use DescribeDeliveryStream.
Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK
, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK
, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.
For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption
and CreateDeliveryStream
should not be called with session credentials that are more than 6 hours old.
If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED
, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is ENABLING_FAILED
, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.
You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut
as its source.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Enables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING
, and then to ENABLED
. The encryption status of a delivery stream is the Status
property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED
. You can continue to read and write data to your delivery stream while the encryption status is ENABLING
, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED
before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption status of a delivery stream, use DescribeDeliveryStream.
Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK
, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK
, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.
For the KMS grant creation to be successful, Firehose APIs StartDeliveryStreamEncryption
and CreateDeliveryStream
should not be called with session credentials that are more than 6 hours old.
If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED
, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is ENABLING_FAILED
, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations.
You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut
as its source.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Disables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING
, and then to DISABLED
. You can continue to read and write data to your stream while its status is DISABLING
. It can take up to 5 seconds after the encryption status changes to DISABLED
before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption state of a delivery stream, use DescribeDeliveryStream.
If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption
, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Disables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING
, and then to DISABLED
. You can continue to read and write data to your stream while its status is DISABLING
. It can take up to 5 seconds after the encryption status changes to DISABLED
before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption state of a delivery stream, use DescribeDeliveryStream.
If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption
, Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Updates the specified destination of the specified delivery stream.
Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.
Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.
If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration
is maintained on the destination.
If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.
Kinesis Data Firehose uses CurrentDeliveryStreamVersionId
to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId
in the next call.
Updates the specified destination of the specified delivery stream.
Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.
Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.
If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration
is maintained on the destination.
If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified.
Firehose uses CurrentDeliveryStreamVersionId
to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId
in the next call.
The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.
\"\ + \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.
\"\ },\ \"CollectionEndpoint\":{\ \"shape\":\"AmazonOpenSearchServerlessCollectionEndpoint\",\ @@ -318,11 +318,11 @@ - (NSString *)definitionString { },\ \"RetryOptions\":{\ \"shape\":\"AmazonOpenSearchServerlessRetryOptions\",\ - \"documentation\":\"The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).
\"\ },\ \"S3BackupMode\":{\ \"shape\":\"AmazonOpenSearchServerlessS3BackupMode\",\ - \"documentation\":\"Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.
\"\ + \"documentation\":\"Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.
\"\ },\ \"S3Configuration\":{\"shape\":\"S3DestinationConfiguration\"},\ \"ProcessingConfiguration\":{\"shape\":\"ProcessingConfiguration\"},\ @@ -370,7 +370,7 @@ - (NSString *)definitionString { \"members\":{\ \"RoleARN\":{\ \"shape\":\"RoleARN\",\ - \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.
\"\ + \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.
\"\ },\ \"CollectionEndpoint\":{\ \"shape\":\"AmazonOpenSearchServerlessCollectionEndpoint\",\ @@ -386,7 +386,7 @@ - (NSString *)definitionString { },\ \"RetryOptions\":{\ \"shape\":\"AmazonOpenSearchServerlessRetryOptions\",\ - \"documentation\":\"The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).
\"\ },\ \"S3Update\":{\"shape\":\"S3DestinationUpdate\"},\ \"ProcessingConfiguration\":{\"shape\":\"ProcessingConfiguration\"},\ @@ -410,10 +410,10 @@ - (NSString *)definitionString { \"members\":{\ \"DurationInSeconds\":{\ \"shape\":\"AmazonOpenSearchServerlessRetryDurationInSeconds\",\ - \"documentation\":\"After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
\"\ + \"documentation\":\"After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
\"\ }\ },\ - \"documentation\":\"Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service.
\"\ + \"documentation\":\"Configures retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service.
\"\ },\ \"AmazonOpenSearchServerlessS3BackupMode\":{\ \"type\":\"string\",\ @@ -462,7 +462,7 @@ - (NSString *)definitionString { \"members\":{\ \"RoleARN\":{\ \"shape\":\"RoleARN\",\ - \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.
\"\ + \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.
\"\ },\ \"DomainARN\":{\ \"shape\":\"AmazonopensearchserviceDomainARN\",\ @@ -478,7 +478,7 @@ - (NSString *)definitionString { },\ \"TypeName\":{\ \"shape\":\"AmazonopensearchserviceTypeName\",\ - \"documentation\":\"The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.
\"\ + \"documentation\":\"The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during run time.
\"\ },\ \"IndexRotationPeriod\":{\ \"shape\":\"AmazonopensearchserviceIndexRotationPeriod\",\ @@ -490,11 +490,11 @@ - (NSString *)definitionString { },\ \"RetryOptions\":{\ \"shape\":\"AmazonopensearchserviceRetryOptions\",\ - \"documentation\":\"The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).
\"\ },\ \"S3BackupMode\":{\ \"shape\":\"AmazonopensearchserviceS3BackupMode\",\ - \"documentation\":\"Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.
\"\ + \"documentation\":\"Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.
\"\ },\ \"S3Configuration\":{\"shape\":\"S3DestinationConfiguration\"},\ \"ProcessingConfiguration\":{\"shape\":\"ProcessingConfiguration\"},\ @@ -502,7 +502,7 @@ - (NSString *)definitionString { \"VpcConfiguration\":{\"shape\":\"VpcConfiguration\"},\ \"DocumentIdOptions\":{\ \"shape\":\"DocumentIdOptions\",\ - \"documentation\":\"Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
\"\ + \"documentation\":\"Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
\"\ }\ },\ \"documentation\":\"Describes the configuration of a destination in Amazon OpenSearch Service
\"\ @@ -520,7 +520,7 @@ - (NSString *)definitionString { },\ \"ClusterEndpoint\":{\ \"shape\":\"AmazonopensearchserviceClusterEndpoint\",\ - \"documentation\":\"The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch Service.
\"\ + \"documentation\":\"The endpoint to use when communicating with the cluster. Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch Service.
\"\ },\ \"IndexName\":{\ \"shape\":\"AmazonopensearchserviceIndexName\",\ @@ -552,7 +552,7 @@ - (NSString *)definitionString { \"VpcConfigurationDescription\":{\"shape\":\"VpcConfigurationDescription\"},\ \"DocumentIdOptions\":{\ \"shape\":\"DocumentIdOptions\",\ - \"documentation\":\"Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
\"\ + \"documentation\":\"Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
\"\ }\ },\ \"documentation\":\"The destination description in Amazon OpenSearch Service.
\"\ @@ -562,7 +562,7 @@ - (NSString *)definitionString { \"members\":{\ \"RoleARN\":{\ \"shape\":\"RoleARN\",\ - \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.
\"\ + \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.
\"\ },\ \"DomainARN\":{\ \"shape\":\"AmazonopensearchserviceDomainARN\",\ @@ -578,7 +578,7 @@ - (NSString *)definitionString { },\ \"TypeName\":{\ \"shape\":\"AmazonopensearchserviceTypeName\",\ - \"documentation\":\"The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.
If you upgrade Elasticsearch from 6.x to 7.x and donât update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.
\"\ + \"documentation\":\"The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.
If you upgrade Elasticsearch from 6.x to 7.x and donât update your delivery stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.
\"\ },\ \"IndexRotationPeriod\":{\ \"shape\":\"AmazonopensearchserviceIndexRotationPeriod\",\ @@ -590,14 +590,14 @@ - (NSString *)definitionString { },\ \"RetryOptions\":{\ \"shape\":\"AmazonopensearchserviceRetryOptions\",\ - \"documentation\":\"The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).
\"\ },\ \"S3Update\":{\"shape\":\"S3DestinationUpdate\"},\ \"ProcessingConfiguration\":{\"shape\":\"ProcessingConfiguration\"},\ \"CloudWatchLoggingOptions\":{\"shape\":\"CloudWatchLoggingOptions\"},\ \"DocumentIdOptions\":{\ \"shape\":\"DocumentIdOptions\",\ - \"documentation\":\"Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
\"\ + \"documentation\":\"Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
\"\ }\ },\ \"documentation\":\"Describes an update for a destination in Amazon OpenSearch Service.
\"\ @@ -634,10 +634,10 @@ - (NSString *)definitionString { \"members\":{\ \"DurationInSeconds\":{\ \"shape\":\"AmazonopensearchserviceRetryDurationInSeconds\",\ - \"documentation\":\"After an initial failure to deliver to Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
\"\ + \"documentation\":\"After an initial failure to deliver to Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
\"\ }\ },\ - \"documentation\":\"Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service.
\"\ + \"documentation\":\"Configures retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service.
\"\ },\ \"AmazonopensearchserviceS3BackupMode\":{\ \"type\":\"string\",\ @@ -693,7 +693,7 @@ - (NSString *)definitionString { \"documentation\":\"Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs
, and vice versa.
Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Kinesis Data Firehose might choose to use different values when it is optimal. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Firehose might choose to use different values when it is optimal. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Optional parameters to use with the Amazon Redshift COPY
command. For more information, see the \\\"Optional Parameters\\\" section of Amazon Redshift COPY command. Some possible examples that would apply to Kinesis Data Firehose are as follows:
delimiter '\\\\t' lzop;
- fields are delimited with \\\"\\\\t\\\" (TAB character) and compressed using lzop.
delimiter '|'
- fields are delimited with \\\"|\\\" (this is the default delimiter).
delimiter '|' escape
- the delimiter should be escaped.
fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6'
- fields are fixed width in the source, with each width specified after every column in the table.
JSON 's3://mybucket/jsonpaths.txt'
- data is in JSON format, and the path specified is the format of the data.
For more examples, see Amazon Redshift COPY command examples.
\"\ + \"documentation\":\"Optional parameters to use with the Amazon Redshift COPY
command. For more information, see the \\\"Optional Parameters\\\" section of Amazon Redshift COPY command. Some possible examples that would apply to Firehose are as follows:
delimiter '\\\\t' lzop;
- fields are delimited with \\\"\\\\t\\\" (TAB character) and compressed using lzop.
delimiter '|'
- fields are delimited with \\\"|\\\" (this is the default delimiter).
delimiter '|' escape
- the delimiter should be escaped.
fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6'
- fields are fixed width in the source, with each width specified after every column in the table.
JSON 's3://mybucket/jsonpaths.txt'
- data is in JSON format, and the path specified is the format of the data.
For more examples, see Amazon Redshift COPY command examples.
\"\ }\ },\ \"documentation\":\"Describes a COPY
command for Amazon Redshift.
The destination in the Serverless offering for Amazon OpenSearch Service. You can specify only one destination.
\"\ },\ - \"MSKSourceConfiguration\":{\"shape\":\"MSKSourceConfiguration\"}\ + \"MSKSourceConfiguration\":{\"shape\":\"MSKSourceConfiguration\"},\ + \"SnowflakeDestinationConfiguration\":{\ + \"shape\":\"SnowflakeDestinationConfiguration\",\ + \"documentation\":\"Configure Snowflake destination
\"\ + }\ }\ },\ \"CreateDeliveryStreamOutput\":{\ @@ -853,6 +857,11 @@ - (NSString *)definitionString { }\ }\ },\ + \"CustomTimeZone\":{\ + \"type\":\"string\",\ + \"max\":50,\ + \"min\":0\ + },\ \"Data\":{\ \"type\":\"blob\",\ \"max\":1024000,\ @@ -867,18 +876,18 @@ - (NSString *)definitionString { },\ \"InputFormatConfiguration\":{\ \"shape\":\"InputFormatConfiguration\",\ - \"documentation\":\"Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled
is set to true.
Specifies the deserializer that you want Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled
is set to true.
Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled
is set to true.
Specifies the serializer that you want Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled
is set to true.
Defaults to true
. Set it to false
if you want to disable format conversion while preserving the configuration details.
Specifies that you want Kinesis Data Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Kinesis Data Firehose uses the serializer and deserializer that you specify, in addition to the column information from the Amazon Web Services Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Kinesis Data Firehose Record Format Conversion.
\"\ + \"documentation\":\"Specifies that you want Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Firehose uses the serializer and deserializer that you specify, in addition to the column information from the Amazon Web Services Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Firehose Record Format Conversion.
\"\ },\ \"DataTableColumns\":{\ \"type\":\"string\",\ @@ -909,7 +918,7 @@ - (NSString *)definitionString { },\ \"AllowForceDelete\":{\ \"shape\":\"BooleanObject\",\ - \"documentation\":\"Set this to true if you want to delete the delivery stream even if Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Kinesis Data Firehose keeps retrying the delete operation.
The default value is false.
\"\ + \"documentation\":\"Set this to true if you want to delete the delivery stream even if Firehose is unable to retire the grant for the CMK. Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the delete operation.
The default value is false.
\"\ }\ }\ },\ @@ -1016,11 +1025,11 @@ - (NSString *)definitionString { \"members\":{\ \"KeyARN\":{\ \"shape\":\"AWSKMSKeyARN\",\ - \"documentation\":\"If you set KeyType
to CUSTOMER_MANAGED_CMK
, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType
to Amazon Web Services_OWNED_CMK
, Kinesis Data Firehose uses a service-account CMK.
If you set KeyType
to CUSTOMER_MANAGED_CMK
, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType
to Amazon Web Services_OWNED_CMK
, Firehose uses a service-account CMK.
Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK
. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType
set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.
When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.
You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Kinesis Data Firehose throws a LimitExceededException
.
To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.
Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK
. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType
set to CUSTOMER_MANAGED_CMK, Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Firehose service to use the customer managed CMK to perform encryption and decryption. Firehose manages that grant.
When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Firehose schedules the grant it had on the old CMK for retirement.
You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Firehose throws a LimitExceededException
.
To encrypt your delivery stream, use symmetric CMKs. Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.
Specifies the type and Amazon Resource Name (ARN) of the CMK to use for Server-Side Encryption (SSE).
\"\ @@ -1104,7 +1113,7 @@ - (NSString *)definitionString { },\ \"ExclusiveStartDestinationId\":{\ \"shape\":\"DestinationId\",\ - \"documentation\":\"The ID of the destination to start returning the destination information. Kinesis Data Firehose supports one destination per delivery stream.
\"\ + \"documentation\":\"The ID of the destination to start returning the destination information. Firehose supports one destination per delivery stream.
\"\ }\ }\ },\ @@ -1128,14 +1137,14 @@ - (NSString *)definitionString { \"members\":{\ \"OpenXJsonSerDe\":{\ \"shape\":\"OpenXJsonSerDe\",\ - \"documentation\":\"The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.
\"\ + \"documentation\":\"The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.
\"\ },\ \"HiveJsonSerDe\":{\ \"shape\":\"HiveJsonSerDe\",\ - \"documentation\":\"The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.
\"\ + \"documentation\":\"The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.
\"\ }\ },\ - \"documentation\":\"The deserializer you want Kinesis Data Firehose to use for converting the input data from JSON. Kinesis Data Firehose then serializes the data to its final format using the Serializer. Kinesis Data Firehose supports two types of deserializers: the Apache Hive JSON SerDe and the OpenX JSON SerDe.
\"\ + \"documentation\":\"The deserializer you want Firehose to use for converting the input data from JSON. Firehose then serializes the data to its final format using the Serializer. Firehose supports two types of deserializers: the Apache Hive JSON SerDe and the OpenX JSON SerDe.
\"\ },\ \"DestinationDescription\":{\ \"type\":\"structure\",\ @@ -1173,6 +1182,10 @@ - (NSString *)definitionString { \"shape\":\"HttpEndpointDestinationDescription\",\ \"documentation\":\"Describes the specified HTTP endpoint destination.
\"\ },\ + \"SnowflakeDestinationDescription\":{\ + \"shape\":\"SnowflakeDestinationDescription\",\ + \"documentation\":\"Optional description for the destination
\"\ + },\ \"AmazonOpenSearchServerlessDestinationDescription\":{\ \"shape\":\"AmazonOpenSearchServerlessDestinationDescription\",\ \"documentation\":\"The destination in the Serverless offering for Amazon OpenSearch Service.
\"\ @@ -1196,21 +1209,21 @@ - (NSString *)definitionString { \"members\":{\ \"DefaultDocumentIdFormat\":{\ \"shape\":\"DefaultDocumentIdFormat\",\ - \"documentation\":\"When the FIREHOSE_DEFAULT
option is chosen, Kinesis Data Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.
When the NO_DOCUMENT_ID
option is chosen, Kinesis Data Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.
When the FIREHOSE_DEFAULT
option is chosen, Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.
When the NO_DOCUMENT_ID
option is chosen, Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.
Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
\"\ + \"documentation\":\"Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
\"\ },\ \"DynamicPartitioningConfiguration\":{\ \"type\":\"structure\",\ \"members\":{\ \"RetryOptions\":{\ \"shape\":\"RetryOptions\",\ - \"documentation\":\"The retry behavior in case Kinesis Data Firehose is unable to deliver data to an Amazon S3 prefix.
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver data to an Amazon S3 prefix.
\"\ },\ \"Enabled\":{\ \"shape\":\"BooleanObject\",\ - \"documentation\":\"Specifies that the dynamic partitioning is enabled for this Kinesis Data Firehose delivery stream.
\"\ + \"documentation\":\"Specifies that the dynamic partitioning is enabled for this Firehose delivery stream.
\"\ }\ },\ \"documentation\":\"The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.
\"\ @@ -1255,7 +1268,7 @@ - (NSString *)definitionString { \"members\":{\ \"RoleARN\":{\ \"shape\":\"RoleARN\",\ - \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
\"\ + \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
\"\ },\ \"DomainARN\":{\ \"shape\":\"ElasticsearchDomainARN\",\ @@ -1271,7 +1284,7 @@ - (NSString *)definitionString { },\ \"TypeName\":{\ \"shape\":\"ElasticsearchTypeName\",\ - \"documentation\":\"The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.
For Elasticsearch 7.x, don't specify a TypeName
.
The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during run time.
For Elasticsearch 7.x, don't specify a TypeName
.
The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
\"\ },\ \"S3BackupMode\":{\ \"shape\":\"ElasticsearchS3BackupMode\",\ - \"documentation\":\"Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly
, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/
appended to the key prefix. When set to AllDocuments
, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/
appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly
.
You can't change this backup mode after you create the delivery stream.
\"\ + \"documentation\":\"Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly
, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/
appended to the key prefix. When set to AllDocuments
, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/
appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly
.
You can't change this backup mode after you create the delivery stream.
\"\ },\ \"S3Configuration\":{\ \"shape\":\"S3DestinationConfiguration\",\ @@ -1307,7 +1320,7 @@ - (NSString *)definitionString { },\ \"DocumentIdOptions\":{\ \"shape\":\"DocumentIdOptions\",\ - \"documentation\":\"Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
\"\ + \"documentation\":\"Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
\"\ }\ },\ \"documentation\":\"Describes the configuration of a destination in Amazon ES.
\"\ @@ -1321,11 +1334,11 @@ - (NSString *)definitionString { },\ \"DomainARN\":{\ \"shape\":\"ElasticsearchDomainARN\",\ - \"documentation\":\"The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
Kinesis Data Firehose uses either ClusterEndpoint
or DomainARN
to send data to Amazon ES.
The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
Firehose uses either ClusterEndpoint
or DomainARN
to send data to Amazon ES.
The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint
or the DomainARN
field to send data to Amazon ES.
The endpoint to use when communicating with the cluster. Firehose uses either this ClusterEndpoint
or the DomainARN
field to send data to Amazon ES.
Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
\"\ + \"documentation\":\"Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
\"\ }\ },\ \"documentation\":\"The destination description in Amazon ES.
\"\ @@ -1379,7 +1392,7 @@ - (NSString *)definitionString { \"members\":{\ \"RoleARN\":{\ \"shape\":\"RoleARN\",\ - \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
\"\ + \"documentation\":\"The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.
\"\ },\ \"DomainARN\":{\ \"shape\":\"ElasticsearchDomainARN\",\ @@ -1395,7 +1408,7 @@ - (NSString *)definitionString { },\ \"TypeName\":{\ \"shape\":\"ElasticsearchTypeName\",\ - \"documentation\":\"The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.
If you upgrade Elasticsearch from 6.x to 7.x and donât update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName
.
The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.
If you upgrade Elasticsearch from 6.x to 7.x and donât update your delivery stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName
.
The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
\"\ },\ \"S3Update\":{\ \"shape\":\"S3DestinationUpdate\",\ @@ -1423,7 +1436,7 @@ - (NSString *)definitionString { },\ \"DocumentIdOptions\":{\ \"shape\":\"DocumentIdOptions\",\ - \"documentation\":\"Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
\"\ + \"documentation\":\"Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.
\"\ }\ },\ \"documentation\":\"Describes an update for a destination in Amazon ES.
\"\ @@ -1460,10 +1473,10 @@ - (NSString *)definitionString { \"members\":{\ \"DurationInSeconds\":{\ \"shape\":\"ElasticsearchRetryDurationInSeconds\",\ - \"documentation\":\"After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
\"\ + \"documentation\":\"After an initial failure to deliver to Amazon ES, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
\"\ }\ },\ - \"documentation\":\"Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES.
\"\ + \"documentation\":\"Configures retry behavior in case Firehose is unable to deliver documents to Amazon ES.
\"\ },\ \"ElasticsearchS3BackupMode\":{\ \"type\":\"string\",\ @@ -1521,7 +1534,7 @@ - (NSString *)definitionString { },\ \"ErrorOutputPrefix\":{\ \"shape\":\"ErrorOutputPrefix\",\ - \"documentation\":\"A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
\"\ + \"documentation\":\"A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
\"\ },\ \"BufferingHints\":{\ \"shape\":\"BufferingHints\",\ @@ -1558,6 +1571,14 @@ - (NSString *)definitionString { \"DynamicPartitioningConfiguration\":{\ \"shape\":\"DynamicPartitioningConfiguration\",\ \"documentation\":\"The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.
\"\ + },\ + \"FileExtension\":{\ + \"shape\":\"FileExtension\",\ + \"documentation\":\"Specify a file extension. It will override the default file extension
\"\ + },\ + \"CustomTimeZone\":{\ + \"shape\":\"CustomTimeZone\",\ + \"documentation\":\"The time zone you prefer. UTC is the default.
\"\ }\ },\ \"documentation\":\"Describes the configuration of a destination in Amazon S3.
\"\ @@ -1586,7 +1607,7 @@ - (NSString *)definitionString { },\ \"ErrorOutputPrefix\":{\ \"shape\":\"ErrorOutputPrefix\",\ - \"documentation\":\"A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
\"\ + \"documentation\":\"A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
\"\ },\ \"BufferingHints\":{\ \"shape\":\"BufferingHints\",\ @@ -1623,6 +1644,14 @@ - (NSString *)definitionString { \"DynamicPartitioningConfiguration\":{\ \"shape\":\"DynamicPartitioningConfiguration\",\ \"documentation\":\"The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.
\"\ + },\ + \"FileExtension\":{\ + \"shape\":\"FileExtension\",\ + \"documentation\":\"Specify a file extension. It will override the default file extension
\"\ + },\ + \"CustomTimeZone\":{\ + \"shape\":\"CustomTimeZone\",\ + \"documentation\":\"The time zone you prefer. UTC is the default.
\"\ }\ },\ \"documentation\":\"Describes a destination in Amazon S3.
\"\ @@ -1644,7 +1673,7 @@ - (NSString *)definitionString { },\ \"ErrorOutputPrefix\":{\ \"shape\":\"ErrorOutputPrefix\",\ - \"documentation\":\"A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
\"\ + \"documentation\":\"A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
\"\ },\ \"BufferingHints\":{\ \"shape\":\"BufferingHints\",\ @@ -1681,6 +1710,14 @@ - (NSString *)definitionString { \"DynamicPartitioningConfiguration\":{\ \"shape\":\"DynamicPartitioningConfiguration\",\ \"documentation\":\"The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.
\"\ + },\ + \"FileExtension\":{\ + \"shape\":\"FileExtension\",\ + \"documentation\":\"Specify a file extension. It will override the default file extension
\"\ + },\ + \"CustomTimeZone\":{\ + \"shape\":\"CustomTimeZone\",\ + \"documentation\":\"The time zone you prefer. UTC is the default.
\"\ }\ },\ \"documentation\":\"Describes an update for a destination in Amazon S3.
\"\ @@ -1703,6 +1740,12 @@ - (NSString *)definitionString { },\ \"documentation\":\"Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.
\"\ },\ + \"FileExtension\":{\ + \"type\":\"string\",\ + \"max\":128,\ + \"min\":0,\ + \"pattern\":\"^$|\\\\.[0-9a-z!\\\\-_.*'()]+\"\ + },\ \"HECAcknowledgmentTimeoutInSeconds\":{\ \"type\":\"integer\",\ \"max\":600,\ @@ -1732,10 +1775,10 @@ - (NSString *)definitionString { \"members\":{\ \"TimestampFormats\":{\ \"shape\":\"ListOfNonEmptyStrings\",\ - \"documentation\":\"Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis
to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf
by default.
Indicates how you want Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis
to parse timestamps in epoch milliseconds. If you don't specify a format, Firehose uses java.sql.Timestamp::valueOf
by default.
The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.
\"\ + \"documentation\":\"The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.
\"\ },\ \"HttpEndpointAccessKey\":{\ \"type\":\"string\",\ @@ -1770,7 +1813,7 @@ - (NSString *)definitionString { \"documentation\":\"Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).
\"\ }\ },\ - \"documentation\":\"Describes the buffering options that can be applied before data is delivered to the HTTP endpoint destination. Kinesis Data Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Describes the buffering options that can be applied before data is delivered to the HTTP endpoint destination. Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
The buffering options that can be used before data is delivered to the specified destination. Kinesis Data Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if you specify a value for one of them, you must also provide a value for the other.
The buffering options that can be used before data is delivered to the specified destination. Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if you specify a value for one of them, you must also provide a value for the other.
Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.
\"\ + \"documentation\":\"Firehose uses this IAM role for all the permissions that the delivery stream needs.
\"\ },\ \"RetryOptions\":{\ \"shape\":\"HttpEndpointRetryOptions\",\ - \"documentation\":\"Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
\"\ + \"documentation\":\"Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
\"\ },\ \"S3BackupMode\":{\ \"shape\":\"HttpEndpointS3BackupMode\",\ - \"documentation\":\"Describes the S3 bucket backup options for the data that Kinesis Data Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData
) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly
).
Describes the S3 bucket backup options for the data that Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData
) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly
).
Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.
\"\ + \"documentation\":\"Firehose uses this IAM role for all the permissions that the delivery stream needs.
\"\ },\ \"RetryOptions\":{\ \"shape\":\"HttpEndpointRetryOptions\",\ - \"documentation\":\"Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
\"\ + \"documentation\":\"Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
\"\ },\ \"S3BackupMode\":{\ \"shape\":\"HttpEndpointS3BackupMode\",\ - \"documentation\":\"Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData
) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly
).
Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData
) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly
).
Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs
and IntervalInSeconds
parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.
Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.
\"\ + \"documentation\":\"Firehose uses this IAM role for all the permissions that the delivery stream needs.
\"\ },\ \"RetryOptions\":{\ \"shape\":\"HttpEndpointRetryOptions\",\ - \"documentation\":\"Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
\"\ + \"documentation\":\"Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
\"\ },\ \"S3BackupMode\":{\ \"shape\":\"HttpEndpointS3BackupMode\",\ - \"documentation\":\"Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData
) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly
).
Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData
) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly
).
Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation.
\"\ + \"documentation\":\"Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation.
\"\ },\ \"CommonAttributes\":{\ \"shape\":\"HttpEndpointCommonAttributesList\",\ @@ -1972,10 +2015,10 @@ - (NSString *)definitionString { \"members\":{\ \"DurationInSeconds\":{\ \"shape\":\"HttpEndpointRetryDurationInSeconds\",\ - \"documentation\":\"The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from the specified destination after each attempt.
\"\ + \"documentation\":\"The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails. It doesn't include the periods during which Firehose waits for acknowledgment from the specified destination after each attempt.
\"\ }\ },\ - \"documentation\":\"Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
\"\ + \"documentation\":\"Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.
\"\ },\ \"HttpEndpointS3BackupMode\":{\ \"type\":\"string\",\ @@ -2023,7 +2066,7 @@ - (NSString *)definitionString { \"code\":{\"shape\":\"ErrorCode\"},\ \"message\":{\"shape\":\"ErrorMessage\"}\ },\ - \"documentation\":\"Kinesis Data Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: AccessDeniedException
, InvalidStateException
, DisabledException
, or NotFoundException
.
Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: AccessDeniedException
, InvalidStateException
, DisabledException
, or NotFoundException
.
Kinesis Data Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.
\"\ + \"documentation\":\"Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.
\"\ }\ },\ - \"documentation\":\"Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.
\"\ + \"documentation\":\"Details about a Kinesis data stream used as the source for a Firehose delivery stream.
\"\ },\ \"LimitExceededException\":{\ \"type\":\"structure\",\ @@ -2257,10 +2300,10 @@ - (NSString *)definitionString { },\ \"DeliveryStartTimestamp\":{\ \"shape\":\"DeliveryStartTimestamp\",\ - \"documentation\":\"Kinesis Data Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.
\"\ + \"documentation\":\"Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.
\"\ }\ },\ - \"documentation\":\"Details about the Amazon MSK cluster used as the source for a Kinesis Data Firehose delivery stream.
\"\ + \"documentation\":\"Details about the Amazon MSK cluster used as the source for a Firehose delivery stream.
\"\ },\ \"NoEncryptionConfig\":{\ \"type\":\"string\",\ @@ -2287,18 +2330,18 @@ - (NSString *)definitionString { \"members\":{\ \"ConvertDotsInJsonKeysToUnderscores\":{\ \"shape\":\"BooleanObject\",\ - \"documentation\":\"When set to true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \\\"a.b\\\", you can define the column name to be \\\"a_b\\\" when using this option.
The default is false
.
When set to true
, specifies that the names of the keys include dots and that you want Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \\\"a.b\\\", you can define the column name to be \\\"a_b\\\" when using this option.
The default is false
.
When set to true
, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
When set to true
, which is the default, Firehose converts JSON keys to lowercase before deserializing them.
Maps column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp
is a Hive keyword. If you have a JSON key named timestamp
, set this parameter to {\\\"ts\\\": \\\"timestamp\\\"}
to map this key to a column named ts
.
The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.
\"\ + \"documentation\":\"The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.
\"\ },\ \"OrcCompression\":{\ \"type\":\"string\",\ @@ -2328,7 +2371,7 @@ - (NSString *)definitionString { },\ \"BlockSizeBytes\":{\ \"shape\":\"BlockSizeBytes\",\ - \"documentation\":\"The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
\"\ + \"documentation\":\"The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.
\"\ },\ \"RowIndexStride\":{\ \"shape\":\"OrcRowIndexStride\",\ @@ -2340,7 +2383,7 @@ - (NSString *)definitionString { },\ \"PaddingTolerance\":{\ \"shape\":\"Proportion\",\ - \"documentation\":\"A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.
For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.
Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false
.
A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.
For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.
Firehose ignores this parameter when OrcSerDe$EnablePadding is false
.
The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null
.
The column names for which you want Firehose to create bloom filters. The default is null
.
Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.
\"\ }\ },\ - \"documentation\":\"Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if Enabled
is set to true.
Specifies the serializer that you want Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if Enabled
is set to true.
The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
\"\ + \"documentation\":\"The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.
\"\ },\ \"PageSizeBytes\":{\ \"shape\":\"ParquetPageSizeBytes\",\ @@ -2468,7 +2511,7 @@ - (NSString *)definitionString { \"documentation\":\"The processor parameters.
\"\ }\ },\ - \"documentation\":\"Describes a data processor.
\"\ + \"documentation\":\"Describes a data processor.
If you want to add a new line delimiter between records in objects that are delivered to Amazon S3, choose AppendDelimiterToRecord
as a processor type. You donât have to put a processor parameter when you select AppendDelimiterToRecord
.
The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
\"\ },\ \"S3Configuration\":{\ \"shape\":\"S3DestinationConfiguration\",\ @@ -2732,7 +2777,7 @@ - (NSString *)definitionString { },\ \"RetryOptions\":{\ \"shape\":\"RedshiftRetryOptions\",\ - \"documentation\":\"The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
\"\ },\ \"S3DestinationDescription\":{\ \"shape\":\"S3DestinationDescription\",\ @@ -2782,7 +2827,7 @@ - (NSString *)definitionString { },\ \"RetryOptions\":{\ \"shape\":\"RedshiftRetryOptions\",\ - \"documentation\":\"The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
\"\ },\ \"S3Update\":{\ \"shape\":\"S3DestinationUpdate\",\ @@ -2817,10 +2862,10 @@ - (NSString *)definitionString { \"members\":{\ \"DurationInSeconds\":{\ \"shape\":\"RedshiftRetryDurationInSeconds\",\ - \"documentation\":\"The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds
is 0 (zero) or if the first delivery attempt takes longer than the current value.
The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds
is 0 (zero) or if the first delivery attempt takes longer than the current value.
Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift.
\"\ + \"documentation\":\"Configures retry behavior in case Firehose is unable to deliver documents to Amazon Redshift.
\"\ },\ \"RedshiftS3BackupMode\":{\ \"type\":\"string\",\ @@ -2861,10 +2906,10 @@ - (NSString *)definitionString { \"members\":{\ \"DurationInSeconds\":{\ \"shape\":\"RetryDurationInSeconds\",\ - \"documentation\":\"The period of time during which Kinesis Data Firehose retries to deliver data to the specified Amazon S3 prefix.
\"\ + \"documentation\":\"The period of time during which Firehose retries to deliver data to the specified Amazon S3 prefix.
\"\ }\ },\ - \"documentation\":\"The retry behavior in case Kinesis Data Firehose is unable to deliver data to an Amazon S3 prefix.
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver data to an Amazon S3 prefix.
\"\ },\ \"RoleARN\":{\ \"type\":\"string\",\ @@ -2900,7 +2945,7 @@ - (NSString *)definitionString { },\ \"ErrorOutputPrefix\":{\ \"shape\":\"ErrorOutputPrefix\",\ - \"documentation\":\"A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
\"\ + \"documentation\":\"A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
\"\ },\ \"BufferingHints\":{\ \"shape\":\"BufferingHints\",\ @@ -2945,7 +2990,7 @@ - (NSString *)definitionString { },\ \"ErrorOutputPrefix\":{\ \"shape\":\"ErrorOutputPrefix\",\ - \"documentation\":\"A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
\"\ + \"documentation\":\"A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
\"\ },\ \"BufferingHints\":{\ \"shape\":\"BufferingHints\",\ @@ -2983,7 +3028,7 @@ - (NSString *)definitionString { },\ \"ErrorOutputPrefix\":{\ \"shape\":\"ErrorOutputPrefix\",\ - \"documentation\":\"A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
\"\ + \"documentation\":\"A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.
\"\ },\ \"BufferingHints\":{\ \"shape\":\"BufferingHints\",\ @@ -3009,7 +3054,7 @@ - (NSString *)definitionString { \"members\":{\ \"RoleARN\":{\ \"shape\":\"NonEmptyStringWithoutWhitespace\",\ - \"documentation\":\"The role that Kinesis Data Firehose can use to access Amazon Web Services Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
If the SchemaConfiguration
request parameter is used as part of invoking the CreateDeliveryStream
API, then the RoleARN
property is required and its value must be specified.
The role that Firehose can use to access Amazon Web Services Glue. This role must be in the same account you use for Firehose. Cross-account roles aren't allowed.
If the SchemaConfiguration
request parameter is used as part of invoking the CreateDeliveryStream
API, then the RoleARN
property is required and its value must be specified.
Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST
, Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.
Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST
, Firehose uses the most recent version. This means that any updates to the table are automatically picked up.
Specifies the schema to which you want Kinesis Data Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled
is set to true.
Specifies the schema to which you want Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled
is set to true.
A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC.
\"\ }\ },\ - \"documentation\":\"The serializer that you want Kinesis Data Firehose to use to convert data to the target format before writing it to Amazon S3. Kinesis Data Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.
\"\ + \"documentation\":\"The serializer that you want Firehose to use to convert data to the target format before writing it to Amazon S3. Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.
\"\ },\ \"ServiceUnavailableException\":{\ \"type\":\"structure\",\ @@ -3062,7 +3107,7 @@ - (NSString *)definitionString { \"documentation\":\"A message that provides information about the error.
\"\ }\ },\ - \"documentation\":\"The service is unavailable. Back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.
\",\ + \"documentation\":\"The service is unavailable. Back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see Amazon Firehose Limits.
\",\ \"exception\":true,\ \"fault\":true\ },\ @@ -3071,6 +3116,335 @@ - (NSString *)definitionString { \"max\":128,\ \"min\":1\ },\ + \"SnowflakeAccountUrl\":{\ + \"type\":\"string\",\ + \"max\":2048,\ + \"min\":24,\ + \"pattern\":\".+?\\\\.snowflakecomputing\\\\.com\",\ + \"sensitive\":true\ + },\ + \"SnowflakeContentColumnName\":{\ + \"type\":\"string\",\ + \"max\":255,\ + \"min\":1,\ + \"sensitive\":true\ + },\ + \"SnowflakeDataLoadingOption\":{\ + \"type\":\"string\",\ + \"enum\":[\ + \"JSON_MAPPING\",\ + \"VARIANT_CONTENT_MAPPING\",\ + \"VARIANT_CONTENT_AND_METADATA_MAPPING\"\ + ]\ + },\ + \"SnowflakeDatabase\":{\ + \"type\":\"string\",\ + \"max\":255,\ + \"min\":1,\ + \"sensitive\":true\ + },\ + \"SnowflakeDestinationConfiguration\":{\ + \"type\":\"structure\",\ + \"required\":[\ + \"AccountUrl\",\ + \"PrivateKey\",\ + \"User\",\ + \"Database\",\ + \"Schema\",\ + \"Table\",\ + \"RoleARN\",\ + \"S3Configuration\"\ + ],\ + \"members\":{\ + \"AccountUrl\":{\ + \"shape\":\"SnowflakeAccountUrl\",\ + \"documentation\":\"URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional.
\"\ + },\ + \"PrivateKey\":{\ + \"shape\":\"SnowflakePrivateKey\",\ + \"documentation\":\"The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.
\"\ + },\ + \"KeyPassphrase\":{\ + \"shape\":\"SnowflakeKeyPassphrase\",\ + \"documentation\":\"Passphrase to decrypt the private key when the key is encrypted. For information, see Using Key Pair Authentication & Key Rotation.
\"\ + },\ + \"User\":{\ + \"shape\":\"SnowflakeUser\",\ + \"documentation\":\"User login name for the Snowflake account.
\"\ + },\ + \"Database\":{\ + \"shape\":\"SnowflakeDatabase\",\ + \"documentation\":\"All data in Snowflake is maintained in databases.
\"\ + },\ + \"Schema\":{\ + \"shape\":\"SnowflakeSchema\",\ + \"documentation\":\"Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views
\"\ + },\ + \"Table\":{\ + \"shape\":\"SnowflakeTable\",\ + \"documentation\":\"All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.
\"\ + },\ + \"SnowflakeRoleConfiguration\":{\ + \"shape\":\"SnowflakeRoleConfiguration\",\ + \"documentation\":\"Optionally configure a Snowflake role. Otherwise the default user role will be used.
\"\ + },\ + \"DataLoadingOption\":{\ + \"shape\":\"SnowflakeDataLoadingOption\",\ + \"documentation\":\"Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.
\"\ + },\ + \"MetaDataColumnName\":{\ + \"shape\":\"SnowflakeMetaDataColumnName\",\ + \"documentation\":\"The name of the record metadata column
\"\ + },\ + \"ContentColumnName\":{\ + \"shape\":\"SnowflakeContentColumnName\",\ + \"documentation\":\"The name of the record content column
\"\ + },\ + \"SnowflakeVpcConfiguration\":{\ + \"shape\":\"SnowflakeVpcConfiguration\",\ + \"documentation\":\"The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake
\"\ + },\ + \"CloudWatchLoggingOptions\":{\"shape\":\"CloudWatchLoggingOptions\"},\ + \"ProcessingConfiguration\":{\"shape\":\"ProcessingConfiguration\"},\ + \"RoleARN\":{\ + \"shape\":\"RoleARN\",\ + \"documentation\":\"The Amazon Resource Name (ARN) of the Snowflake role
\"\ + },\ + \"RetryOptions\":{\ + \"shape\":\"SnowflakeRetryOptions\",\ + \"documentation\":\"The time period where Firehose will retry sending data to the chosen HTTP endpoint.
\"\ + },\ + \"S3BackupMode\":{\ + \"shape\":\"SnowflakeS3BackupMode\",\ + \"documentation\":\"Choose an S3 backup mode
\"\ + },\ + \"S3Configuration\":{\"shape\":\"S3DestinationConfiguration\"}\ + },\ + \"documentation\":\"Configure Snowflake destination
\"\ + },\ + \"SnowflakeDestinationDescription\":{\ + \"type\":\"structure\",\ + \"members\":{\ + \"AccountUrl\":{\ + \"shape\":\"SnowflakeAccountUrl\",\ + \"documentation\":\"URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional.
\"\ + },\ + \"User\":{\ + \"shape\":\"SnowflakeUser\",\ + \"documentation\":\"User login name for the Snowflake account.
\"\ + },\ + \"Database\":{\ + \"shape\":\"SnowflakeDatabase\",\ + \"documentation\":\"All data in Snowflake is maintained in databases.
\"\ + },\ + \"Schema\":{\ + \"shape\":\"SnowflakeSchema\",\ + \"documentation\":\"Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views
\"\ + },\ + \"Table\":{\ + \"shape\":\"SnowflakeTable\",\ + \"documentation\":\"All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.
\"\ + },\ + \"SnowflakeRoleConfiguration\":{\ + \"shape\":\"SnowflakeRoleConfiguration\",\ + \"documentation\":\"Optionally configure a Snowflake role. Otherwise the default user role will be used.
\"\ + },\ + \"DataLoadingOption\":{\ + \"shape\":\"SnowflakeDataLoadingOption\",\ + \"documentation\":\"Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.
\"\ + },\ + \"MetaDataColumnName\":{\ + \"shape\":\"SnowflakeMetaDataColumnName\",\ + \"documentation\":\"The name of the record metadata column
\"\ + },\ + \"ContentColumnName\":{\ + \"shape\":\"SnowflakeContentColumnName\",\ + \"documentation\":\"The name of the record content column
\"\ + },\ + \"SnowflakeVpcConfiguration\":{\ + \"shape\":\"SnowflakeVpcConfiguration\",\ + \"documentation\":\"The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake
\"\ + },\ + \"CloudWatchLoggingOptions\":{\"shape\":\"CloudWatchLoggingOptions\"},\ + \"ProcessingConfiguration\":{\"shape\":\"ProcessingConfiguration\"},\ + \"RoleARN\":{\ + \"shape\":\"RoleARN\",\ + \"documentation\":\"The Amazon Resource Name (ARN) of the Snowflake role
\"\ + },\ + \"RetryOptions\":{\ + \"shape\":\"SnowflakeRetryOptions\",\ + \"documentation\":\"The time period where Firehose will retry sending data to the chosen HTTP endpoint.
\"\ + },\ + \"S3BackupMode\":{\ + \"shape\":\"SnowflakeS3BackupMode\",\ + \"documentation\":\"Choose an S3 backup mode
\"\ + },\ + \"S3DestinationDescription\":{\"shape\":\"S3DestinationDescription\"}\ + },\ + \"documentation\":\"Optional Snowflake destination description
\"\ + },\ + \"SnowflakeDestinationUpdate\":{\ + \"type\":\"structure\",\ + \"members\":{\ + \"AccountUrl\":{\ + \"shape\":\"SnowflakeAccountUrl\",\ + \"documentation\":\"URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional.
\"\ + },\ + \"PrivateKey\":{\ + \"shape\":\"SnowflakePrivateKey\",\ + \"documentation\":\"The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.
\"\ + },\ + \"KeyPassphrase\":{\ + \"shape\":\"SnowflakeKeyPassphrase\",\ + \"documentation\":\"Passphrase to decrypt the private key when the key is encrypted. For information, see Using Key Pair Authentication & Key Rotation.
\"\ + },\ + \"User\":{\ + \"shape\":\"SnowflakeUser\",\ + \"documentation\":\"User login name for the Snowflake account.
\"\ + },\ + \"Database\":{\ + \"shape\":\"SnowflakeDatabase\",\ + \"documentation\":\"All data in Snowflake is maintained in databases.
\"\ + },\ + \"Schema\":{\ + \"shape\":\"SnowflakeSchema\",\ + \"documentation\":\"Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views
\"\ + },\ + \"Table\":{\ + \"shape\":\"SnowflakeTable\",\ + \"documentation\":\"All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.
\"\ + },\ + \"SnowflakeRoleConfiguration\":{\ + \"shape\":\"SnowflakeRoleConfiguration\",\ + \"documentation\":\"Optionally configure a Snowflake role. Otherwise the default user role will be used.
\"\ + },\ + \"DataLoadingOption\":{\ + \"shape\":\"SnowflakeDataLoadingOption\",\ + \"documentation\":\"JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.
\"\ + },\ + \"MetaDataColumnName\":{\ + \"shape\":\"SnowflakeMetaDataColumnName\",\ + \"documentation\":\"The name of the record metadata column
\"\ + },\ + \"ContentColumnName\":{\ + \"shape\":\"SnowflakeContentColumnName\",\ + \"documentation\":\"The name of the content metadata column
\"\ + },\ + \"CloudWatchLoggingOptions\":{\"shape\":\"CloudWatchLoggingOptions\"},\ + \"ProcessingConfiguration\":{\"shape\":\"ProcessingConfiguration\"},\ + \"RoleARN\":{\ + \"shape\":\"RoleARN\",\ + \"documentation\":\"The Amazon Resource Name (ARN) of the Snowflake role
\"\ + },\ + \"RetryOptions\":{\ + \"shape\":\"SnowflakeRetryOptions\",\ + \"documentation\":\"Specify how long Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesnât arrive within the acknowledgment timeout period, Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Firehose to retry sending data, set this value to 0.
\"\ + },\ + \"S3BackupMode\":{\ + \"shape\":\"SnowflakeS3BackupMode\",\ + \"documentation\":\"Choose an S3 backup mode
\"\ + },\ + \"S3Update\":{\"shape\":\"S3DestinationUpdate\"}\ + },\ + \"documentation\":\"Update to configuration settings
\"\ + },\ + \"SnowflakeKeyPassphrase\":{\ + \"type\":\"string\",\ + \"max\":255,\ + \"min\":7,\ + \"sensitive\":true\ + },\ + \"SnowflakeMetaDataColumnName\":{\ + \"type\":\"string\",\ + \"max\":255,\ + \"min\":1,\ + \"sensitive\":true\ + },\ + \"SnowflakePrivateKey\":{\ + \"type\":\"string\",\ + \"max\":4096,\ + \"min\":256,\ + \"pattern\":\"^(?:[A-Za-z0-9+\\\\/]{4})*(?:[A-Za-z0-9+\\\\/]{2}==|[A-Za-z0-9+\\\\/]{3}=)?$\",\ + \"sensitive\":true\ + },\ + \"SnowflakePrivateLinkVpceId\":{\ + \"type\":\"string\",\ + \"max\":255,\ + \"min\":47,\ + \"pattern\":\"([a-zA-Z0-9\\\\-\\\\_]+\\\\.){2,3}vpce\\\\.[a-zA-Z0-9\\\\-]*\\\\.vpce-svc\\\\-[a-zA-Z0-9\\\\-]{17}$\",\ + \"sensitive\":true\ + },\ + \"SnowflakeRetryDurationInSeconds\":{\ + \"type\":\"integer\",\ + \"max\":7200,\ + \"min\":0\ + },\ + \"SnowflakeRetryOptions\":{\ + \"type\":\"structure\",\ + \"members\":{\ + \"DurationInSeconds\":{\ + \"shape\":\"SnowflakeRetryDurationInSeconds\",\ + \"documentation\":\"the time period where Firehose will retry sending data to the chosen HTTP endpoint.
\"\ + }\ + },\ + \"documentation\":\"Specify how long Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesnât arrive within the acknowledgment timeout period, Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Firehose to retry sending data, set this value to 0.
\"\ + },\ + \"SnowflakeRole\":{\ + \"type\":\"string\",\ + \"max\":255,\ + \"min\":1,\ + \"sensitive\":true\ + },\ + \"SnowflakeRoleConfiguration\":{\ + \"type\":\"structure\",\ + \"members\":{\ + \"Enabled\":{\ + \"shape\":\"BooleanObject\",\ + \"documentation\":\"Enable Snowflake role
\"\ + },\ + \"SnowflakeRole\":{\ + \"shape\":\"SnowflakeRole\",\ + \"documentation\":\"The Snowflake role you wish to configure
\"\ + }\ + },\ + \"documentation\":\"Optionally configure a Snowflake role. Otherwise the default user role will be used.
\"\ + },\ + \"SnowflakeS3BackupMode\":{\ + \"type\":\"string\",\ + \"enum\":[\ + \"FailedDataOnly\",\ + \"AllData\"\ + ]\ + },\ + \"SnowflakeSchema\":{\ + \"type\":\"string\",\ + \"max\":255,\ + \"min\":1,\ + \"sensitive\":true\ + },\ + \"SnowflakeTable\":{\ + \"type\":\"string\",\ + \"max\":255,\ + \"min\":1,\ + \"sensitive\":true\ + },\ + \"SnowflakeUser\":{\ + \"type\":\"string\",\ + \"max\":255,\ + \"min\":1,\ + \"sensitive\":true\ + },\ + \"SnowflakeVpcConfiguration\":{\ + \"type\":\"structure\",\ + \"required\":[\"PrivateLinkVpceId\"],\ + \"members\":{\ + \"PrivateLinkVpceId\":{\ + \"shape\":\"SnowflakePrivateLinkVpceId\",\ + \"documentation\":\"The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake
\"\ + }\ + },\ + \"documentation\":\"Configure a Snowflake VPC
\"\ + },\ \"SourceDescription\":{\ \"type\":\"structure\",\ \"members\":{\ @@ -3083,7 +3457,7 @@ - (NSString *)definitionString { \"documentation\":\"The configuration description for the Amazon MSK cluster to be used as the source for a delivery stream.
\"\ }\ },\ - \"documentation\":\"Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.
\"\ + \"documentation\":\"Details about a Kinesis data stream used as the source for a Firehose delivery stream.
\"\ },\ \"SplunkBufferingHints\":{\ \"type\":\"structure\",\ @@ -3120,7 +3494,7 @@ - (NSString *)definitionString { \"members\":{\ \"HECEndpoint\":{\ \"shape\":\"HECEndpoint\",\ - \"documentation\":\"The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
\"\ + \"documentation\":\"The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.
\"\ },\ \"HECEndpointType\":{\ \"shape\":\"HECEndpointType\",\ @@ -3132,15 +3506,15 @@ - (NSString *)definitionString { },\ \"HECAcknowledgmentTimeoutInSeconds\":{\ \"shape\":\"HECAcknowledgmentTimeoutInSeconds\",\ - \"documentation\":\"The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
\"\ + \"documentation\":\"The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.
\"\ },\ \"RetryOptions\":{\ \"shape\":\"SplunkRetryOptions\",\ - \"documentation\":\"The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.
\"\ },\ \"S3BackupMode\":{\ \"shape\":\"SplunkS3BackupMode\",\ - \"documentation\":\"Defines how documents should be delivered to Amazon S3. When set to FailedEventsOnly
, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents
, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly
.
You can update this backup mode from FailedEventsOnly
to AllEvents
. You can't update it from AllEvents
to FailedEventsOnly
.
Defines how documents should be delivered to Amazon S3. When set to FailedEventsOnly
, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents
, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly
.
You can update this backup mode from FailedEventsOnly
to AllEvents
. You can't update it from AllEvents
to FailedEventsOnly
.
The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
\"\ + \"documentation\":\"The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.
\"\ },\ \"HECEndpointType\":{\ \"shape\":\"HECEndpointType\",\ @@ -3178,15 +3552,15 @@ - (NSString *)definitionString { },\ \"HECAcknowledgmentTimeoutInSeconds\":{\ \"shape\":\"HECAcknowledgmentTimeoutInSeconds\",\ - \"documentation\":\"The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
\"\ + \"documentation\":\"The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.
\"\ },\ \"RetryOptions\":{\ \"shape\":\"SplunkRetryOptions\",\ - \"documentation\":\"The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
\"\ },\ \"S3BackupMode\":{\ \"shape\":\"SplunkS3BackupMode\",\ - \"documentation\":\"Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly
, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments
, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly
.
Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly
, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments
, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly
.
The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
\"\ + \"documentation\":\"The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.
\"\ },\ \"HECEndpointType\":{\ \"shape\":\"HECEndpointType\",\ @@ -3224,15 +3598,15 @@ - (NSString *)definitionString { },\ \"HECAcknowledgmentTimeoutInSeconds\":{\ \"shape\":\"HECAcknowledgmentTimeoutInSeconds\",\ - \"documentation\":\"The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
\"\ + \"documentation\":\"The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.
\"\ },\ \"RetryOptions\":{\ \"shape\":\"SplunkRetryOptions\",\ - \"documentation\":\"The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
\"\ + \"documentation\":\"The retry behavior in case Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
\"\ },\ \"S3BackupMode\":{\ \"shape\":\"SplunkS3BackupMode\",\ - \"documentation\":\"Specifies how you want Kinesis Data Firehose to back up documents to Amazon S3. When set to FailedDocumentsOnly
, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents
, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly
.
You can update this backup mode from FailedEventsOnly
to AllEvents
. You can't update it from AllEvents
to FailedEventsOnly
.
Specifies how you want Firehose to back up documents to Amazon S3. When set to FailedDocumentsOnly
, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents
, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly
.
You can update this backup mode from FailedEventsOnly
to AllEvents
. You can't update it from AllEvents
to FailedEventsOnly
.
The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.
\"\ + \"documentation\":\"The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Firehose waits for acknowledgment from Splunk after each attempt.
\"\ }\ },\ - \"documentation\":\"Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Splunk, or if it doesn't receive an acknowledgment from Splunk.
\"\ + \"documentation\":\"Configures retry behavior in case Firehose is unable to deliver documents to Splunk, or if it doesn't receive an acknowledgment from Splunk.
\"\ },\ \"SplunkS3BackupMode\":{\ \"type\":\"string\",\ @@ -3457,6 +3831,10 @@ - (NSString *)definitionString { \"AmazonOpenSearchServerlessDestinationUpdate\":{\ \"shape\":\"AmazonOpenSearchServerlessDestinationUpdate\",\ \"documentation\":\"Describes an update for a destination in the Serverless offering for Amazon OpenSearch Service.
\"\ + },\ + \"SnowflakeDestinationUpdate\":{\ + \"shape\":\"SnowflakeDestinationUpdate\",\ + \"documentation\":\"Update to the Snowflake destination condiguration settings
\"\ }\ }\ },\ @@ -3482,15 +3860,15 @@ - (NSString *)definitionString { \"members\":{\ \"SubnetIds\":{\ \"shape\":\"SubnetIdList\",\ - \"documentation\":\"The IDs of the subnets that you want Kinesis Data Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
\"\ + \"documentation\":\"The IDs of the subnets that you want Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
\"\ },\ \"RoleARN\":{\ \"shape\":\"RoleARN\",\ - \"documentation\":\"The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC. You can use your existing Kinesis Data Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Kinesis Data Firehose service principal and that it grants the following permissions:
ec2:DescribeVpcs
ec2:DescribeVpcAttribute
ec2:DescribeSubnets
ec2:DescribeSecurityGroups
ec2:DescribeNetworkInterfaces
ec2:CreateNetworkInterface
ec2:CreateNetworkInterfacePermission
ec2:DeleteNetworkInterface
If you revoke these permissions after you create the delivery stream, Kinesis Data Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.
\"\ + \"documentation\":\"The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:
ec2:DescribeVpcs
ec2:DescribeVpcAttribute
ec2:DescribeSubnets
ec2:DescribeSecurityGroups
ec2:DescribeNetworkInterfaces
ec2:CreateNetworkInterface
ec2:CreateNetworkInterfacePermission
ec2:DeleteNetworkInterface
When you specify subnets for delivering data to the destination in a private VPC, make sure you have enough number of free IP addresses in chosen subnets. If there is no available free IP address in a specified subnet, Firehose cannot create or add ENIs for the data delivery in the private VPC, and the delivery will be degraded or fail.
The IDs of the security groups that you want Kinesis Data Firehose to use when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups here, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.
\"\ + \"documentation\":\"The IDs of the security groups that you want Firehose to use when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups here, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.
\"\ }\ },\ \"documentation\":\"The details of the VPC of the Amazon OpenSearch or Amazon OpenSearch Serverless destination.
\"\ @@ -3506,15 +3884,15 @@ - (NSString *)definitionString { \"members\":{\ \"SubnetIds\":{\ \"shape\":\"SubnetIdList\",\ - \"documentation\":\"The IDs of the subnets that Kinesis Data Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
\"\ + \"documentation\":\"The IDs of the subnets that Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
\"\ },\ \"RoleARN\":{\ \"shape\":\"RoleARN\",\ - \"documentation\":\"The ARN of the IAM role that the delivery stream uses to create endpoints in the destination VPC. You can use your existing Kinesis Data Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Kinesis Data Firehose service principal and that it grants the following permissions:
ec2:DescribeVpcs
ec2:DescribeVpcAttribute
ec2:DescribeSubnets
ec2:DescribeSecurityGroups
ec2:DescribeNetworkInterfaces
ec2:CreateNetworkInterface
ec2:CreateNetworkInterfacePermission
ec2:DeleteNetworkInterface
If you revoke these permissions after you create the delivery stream, Kinesis Data Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.
\"\ + \"documentation\":\"The ARN of the IAM role that the delivery stream uses to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:
ec2:DescribeVpcs
ec2:DescribeVpcAttribute
ec2:DescribeSubnets
ec2:DescribeSecurityGroups
ec2:DescribeNetworkInterfaces
ec2:CreateNetworkInterface
ec2:CreateNetworkInterfacePermission
ec2:DeleteNetworkInterface
If you revoke these permissions after you create the delivery stream, Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.
\"\ },\ \"SecurityGroupIds\":{\ \"shape\":\"SecurityGroupIdList\",\ - \"documentation\":\"The IDs of the security groups that Kinesis Data Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.
\"\ + \"documentation\":\"The IDs of the security groups that Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.
\"\ },\ \"VpcId\":{\ \"shape\":\"NonEmptyStringWithoutWhitespace\",\ @@ -3524,7 +3902,7 @@ - (NSString *)definitionString { \"documentation\":\"The details of the VPC of the Amazon ES destination.
\"\ }\ },\ - \"documentation\":\"Amazon Kinesis Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations.
\"\ + \"documentation\":\"Amazon Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations.
\"\ }\ "; } diff --git a/AWSKinesis/AWSFirehoseService.h b/AWSKinesis/AWSFirehoseService.h index 328dbc5a4cc..b23be4c85bc 100644 --- a/AWSKinesis/AWSFirehoseService.h +++ b/AWSKinesis/AWSFirehoseService.h @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ NS_ASSUME_NONNULL_BEGIN FOUNDATION_EXPORT NSString *const AWSFirehoseSDKVersion; /** -Amazon Kinesis Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations.
+Amazon Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations.
*/ @interface AWSFirehose : AWSService @@ -175,7 +175,7 @@ FOUNDATION_EXPORT NSString *const AWSFirehoseSDKVersion; + (void)removeFirehoseForKey:(NSString *)key; /** -Creates a Kinesis Data Firehose delivery stream.
By default, you can create up to 50 delivery streams per Amazon Web Services Region.
This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING
. After the delivery stream is created, its status is ACTIVE
and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED
. Attempts to send data to a delivery stream that is not in the ACTIVE
state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.
If the status of a delivery stream is CREATING_FAILED
, this status doesn't change, and you can't invoke CreateDeliveryStream
again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.
A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType
parameter to KinesisStreamAsSource
, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration
parameter.
To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.
A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration
, S3DestinationConfiguration
, ElasticsearchDestinationConfiguration
, RedshiftDestinationConfiguration
, or SplunkDestinationConfiguration
.
When you specify S3DestinationConfiguration
, you can also provide the following optional values: BufferingHints, EncryptionConfiguration
, and CompressionFormat
. By default, if no BufferingHints
value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints
is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.
A few notes about Amazon Redshift as a destination:
An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY
syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration
parameter.
The compression formats SNAPPY
or ZIP
cannot be specified in RedshiftDestinationConfiguration.S3Configuration
because the Amazon Redshift COPY
operation that reads from the S3 bucket doesn't support these compression formats.
We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT
permissions.
Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
+Creates a Firehose delivery stream.
By default, you can create up to 50 delivery streams per Amazon Web Services Region.
This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING
. After the delivery stream is created, its status is ACTIVE
and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED
. Attempts to send data to a delivery stream that is not in the ACTIVE
state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.
If the status of a delivery stream is CREATING_FAILED
, this status doesn't change, and you can't invoke CreateDeliveryStream
again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.
A Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType
parameter to KinesisStreamAsSource
, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration
parameter.
To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.
A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration
, S3DestinationConfiguration
, ElasticsearchDestinationConfiguration
, RedshiftDestinationConfiguration
, or SplunkDestinationConfiguration
.
When you specify S3DestinationConfiguration
, you can also provide the following optional values: BufferingHints, EncryptionConfiguration
, and CompressionFormat
. By default, if no BufferingHints
value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints
is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.
A few notes about Amazon Redshift as a destination:
An Amazon Redshift destination requires an S3 bucket as intermediate location. Firehose first delivers data to Amazon S3 and then uses COPY
syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration
parameter.
The compression formats SNAPPY
or ZIP
cannot be specified in RedshiftDestinationConfiguration.S3Configuration
because the Amazon Redshift COPY
operation that reads from the S3 bucket doesn't support these compression formats.
We strongly recommend that you use the user name and password you provide exclusively with Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT
permissions.
Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide.
@param request A container for the necessary parameters to execute the CreateDeliveryStream service method. @@ -187,7 +187,7 @@ FOUNDATION_EXPORT NSString *const AWSFirehoseSDKVersion; - (AWSTaskCreates a Kinesis Data Firehose delivery stream.
By default, you can create up to 50 delivery streams per Amazon Web Services Region.
This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING
. After the delivery stream is created, its status is ACTIVE
and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED
. Attempts to send data to a delivery stream that is not in the ACTIVE
state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.
If the status of a delivery stream is CREATING_FAILED
, this status doesn't change, and you can't invoke CreateDeliveryStream
again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.
A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType
parameter to KinesisStreamAsSource
, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration
parameter.
To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.
A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration
, S3DestinationConfiguration
, ElasticsearchDestinationConfiguration
, RedshiftDestinationConfiguration
, or SplunkDestinationConfiguration
.
When you specify S3DestinationConfiguration
, you can also provide the following optional values: BufferingHints, EncryptionConfiguration
, and CompressionFormat
. By default, if no BufferingHints
value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints
is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.
A few notes about Amazon Redshift as a destination:
An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY
syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration
parameter.
The compression formats SNAPPY
or ZIP
cannot be specified in RedshiftDestinationConfiguration.S3Configuration
because the Amazon Redshift COPY
operation that reads from the S3 bucket doesn't support these compression formats.
We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT
permissions.
Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
+Creates a Firehose delivery stream.
By default, you can create up to 50 delivery streams per Amazon Web Services Region.
This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING
. After the delivery stream is created, its status is ACTIVE
and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED
. Attempts to send data to a delivery stream that is not in the ACTIVE
state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.
If the status of a delivery stream is CREATING_FAILED
, this status doesn't change, and you can't invoke CreateDeliveryStream
again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.
A Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType
parameter to KinesisStreamAsSource
, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration
parameter.
To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.
A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration
, S3DestinationConfiguration
, ElasticsearchDestinationConfiguration
, RedshiftDestinationConfiguration
, or SplunkDestinationConfiguration
.
When you specify S3DestinationConfiguration
, you can also provide the following optional values: BufferingHints, EncryptionConfiguration
, and CompressionFormat
. By default, if no BufferingHints
value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints
is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.
A few notes about Amazon Redshift as a destination:
An Amazon Redshift destination requires an S3 bucket as intermediate location. Firehose first delivers data to Amazon S3 and then uses COPY
syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration
parameter.
The compression formats SNAPPY
or ZIP
cannot be specified in RedshiftDestinationConfiguration.S3Configuration
because the Amazon Redshift COPY
operation that reads from the S3 bucket doesn't support these compression formats.
We strongly recommend that you use the user name and password you provide exclusively with Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT
permissions.
Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide.
@param request A container for the necessary parameters to execute the CreateDeliveryStream service method. @param completionHandler The completion handler to call when the load request is complete. @@ -200,7 +200,7 @@ FOUNDATION_EXPORT NSString *const AWSFirehoseSDKVersion; - (void)createDeliveryStream:(AWSFirehoseCreateDeliveryStreamInput *)request completionHandler:(void (^ _Nullable)(AWSFirehoseCreateDeliveryStreamOutput * _Nullable response, NSError * _Nullable error))completionHandler; /** -Deletes a delivery stream and its data.
To check the state of a delivery stream, use DescribeDeliveryStream. You can delete a delivery stream only if it is in one of the following states: ACTIVE
, DELETING
, CREATING_FAILED
, or DELETING_FAILED
. You can't delete a delivery stream that is in the CREATING
state. While the deletion request is in process, the delivery stream is in the DELETING
state.
While the delivery stream is in the DELETING
state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.
Deletes a delivery stream and its data.
You can delete a delivery stream only if it is in one of the following states: ACTIVE
, DELETING
, CREATING_FAILED
, or DELETING_FAILED
. You can't delete a delivery stream that is in the CREATING
state. To check the state of a delivery stream, use DescribeDeliveryStream.
DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the DELETING
state.While the delivery stream is in the DELETING
state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.
Removal of a delivery stream that is in the DELETING
state is a low priority operation for the service. A stream may remain in the DELETING
state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING
state to be removed.
Deletes a delivery stream and its data.
To check the state of a delivery stream, use DescribeDeliveryStream. You can delete a delivery stream only if it is in one of the following states: ACTIVE
, DELETING
, CREATING_FAILED
, or DELETING_FAILED
. You can't delete a delivery stream that is in the CREATING
state. While the deletion request is in process, the delivery stream is in the DELETING
state.
While the delivery stream is in the DELETING
state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.
Deletes a delivery stream and its data.
You can delete a delivery stream only if it is in one of the following states: ACTIVE
, DELETING
, CREATING_FAILED
, or DELETING_FAILED
. You can't delete a delivery stream that is in the CREATING
state. To check the state of a delivery stream, use DescribeDeliveryStream.
DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the DELETING
state.While the delivery stream is in the DELETING
state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.
Removal of a delivery stream that is in the DELETING
state is a low priority operation for the service. A stream may remain in the DELETING
state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING
state to be removed.
Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.
By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.
Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.
Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecord
operation returns a RecordId
, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.
If the PutRecord
operation throws a ServiceUnavailableException
, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Writes a single data record into an Amazon Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.
By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Firehose Limits.
Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.
Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecord
operation returns a RecordId
, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.
If the PutRecord
operation throws a ServiceUnavailableException
, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.
Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.
By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.
Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.
Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecord
operation returns a RecordId
, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.
If the PutRecord
operation throws a ServiceUnavailableException
, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Writes a single data record into an Amazon Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.
By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Firehose Limits.
Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.
Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecord
operation returns a RecordId
, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.
If the PutRecord
operation throws a ServiceUnavailableException
, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.
Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.
Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
For information about service quota, see Amazon Kinesis Data Firehose Quota.
Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.
Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecordBatch response includes a count of failed records, FailedPutCount
, and an array of responses, RequestResponses
. Even if the PutRecordBatch call succeeds, the value of FailedPutCount
may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses
array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses
includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.
A successfully processed record includes a RecordId
value, which is unique for the record. An unsuccessfully processed record includes ErrorCode
and ErrorMessage
values. ErrorCode
reflects the type of error, and is one of the following values: ServiceUnavailableException
or InternalFailure
. ErrorMessage
provides more detailed information about the error.
If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount
is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.
If PutRecordBatch throws ServiceUnavailableException
, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.
Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
For information about service quota, see Amazon Firehose Quota.
Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.
Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecordBatch response includes a count of failed records, FailedPutCount
, and an array of responses, RequestResponses
. Even if the PutRecordBatch call succeeds, the value of FailedPutCount
may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses
array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses
includes both successfully and unsuccessfully processed records. Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.
A successfully processed record includes a RecordId
value, which is unique for the record. An unsuccessfully processed record includes ErrorCode
and ErrorMessage
values. ErrorCode
reflects the type of error, and is one of the following values: ServiceUnavailableException
or InternalFailure
. ErrorMessage
provides more detailed information about the error.
If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount
is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.
If PutRecordBatch throws ServiceUnavailableException
, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.
Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.
Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
For information about service quota, see Amazon Kinesis Data Firehose Quota.
Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.
Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecordBatch response includes a count of failed records, FailedPutCount
, and an array of responses, RequestResponses
. Even if the PutRecordBatch call succeeds, the value of FailedPutCount
may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses
array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses
includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.
A successfully processed record includes a RecordId
value, which is unique for the record. An unsuccessfully processed record includes ErrorCode
and ErrorMessage
values. ErrorCode
reflects the type of error, and is one of the following values: ServiceUnavailableException
or InternalFailure
. ErrorMessage
provides more detailed information about the error.
If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount
is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.
If PutRecordBatch throws ServiceUnavailableException
, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.
Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
For information about service quota, see Amazon Firehose Quota.
Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.
You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.
Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecordBatch response includes a count of failed records, FailedPutCount
, and an array of responses, RequestResponses
. Even if the PutRecordBatch call succeeds, the value of FailedPutCount
may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses
array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses
includes both successfully and unsuccessfully processed records. Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.
A successfully processed record includes a RecordId
value, which is unique for the record. An unsuccessfully processed record includes ErrorCode
and ErrorMessage
values. ErrorCode
reflects the type of error, and is one of the following values: ServiceUnavailableException
or InternalFailure
. ErrorMessage
provides more detailed information about the error.
If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount
is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.
If PutRecordBatch throws ServiceUnavailableException
, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.
Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
Enables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING
, and then to ENABLED
. The encryption status of a delivery stream is the Status
property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED
. You can continue to read and write data to your delivery stream while the encryption status is ENABLING
, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED
before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption status of a delivery stream, use DescribeDeliveryStream.
Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK
, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK
, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.
For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption
and CreateDeliveryStream
should not be called with session credentials that are more than 6 hours old.
If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED
, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is ENABLING_FAILED
, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.
You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut
as its source.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Enables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING
, and then to ENABLED
. The encryption status of a delivery stream is the Status
property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED
. You can continue to read and write data to your delivery stream while the encryption status is ENABLING
, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED
before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption status of a delivery stream, use DescribeDeliveryStream.
Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK
, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK
, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.
For the KMS grant creation to be successful, Firehose APIs StartDeliveryStreamEncryption
and CreateDeliveryStream
should not be called with session credentials that are more than 6 hours old.
If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED
, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is ENABLING_FAILED
, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations.
You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut
as its source.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Enables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING
, and then to ENABLED
. The encryption status of a delivery stream is the Status
property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED
. You can continue to read and write data to your delivery stream while the encryption status is ENABLING
, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED
before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption status of a delivery stream, use DescribeDeliveryStream.
Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK
, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK
, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.
For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption
and CreateDeliveryStream
should not be called with session credentials that are more than 6 hours old.
If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED
, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is ENABLING_FAILED
, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.
You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut
as its source.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Enables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING
, and then to ENABLED
. The encryption status of a delivery stream is the Status
property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED
. You can continue to read and write data to your delivery stream while the encryption status is ENABLING
, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED
before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption status of a delivery stream, use DescribeDeliveryStream.
Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK
, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK
, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.
For the KMS grant creation to be successful, Firehose APIs StartDeliveryStreamEncryption
and CreateDeliveryStream
should not be called with session credentials that are more than 6 hours old.
If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED
, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is ENABLING_FAILED
, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations.
You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut
as its source.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Disables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING
, and then to DISABLED
. You can continue to read and write data to your stream while its status is DISABLING
. It can take up to 5 seconds after the encryption status changes to DISABLED
before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption state of a delivery stream, use DescribeDeliveryStream.
If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption
, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Disables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING
, and then to DISABLED
. You can continue to read and write data to your stream while its status is DISABLING
. It can take up to 5 seconds after the encryption status changes to DISABLED
before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption state of a delivery stream, use DescribeDeliveryStream.
If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption
, Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Disables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING
, and then to DISABLED
. You can continue to read and write data to your stream while its status is DISABLING
. It can take up to 5 seconds after the encryption status changes to DISABLED
before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption state of a delivery stream, use DescribeDeliveryStream.
If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption
, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Disables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING
, and then to DISABLED
. You can continue to read and write data to your stream while its status is DISABLING
. It can take up to 5 seconds after the encryption status changes to DISABLED
before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.
To check the encryption state of a delivery stream, use DescribeDeliveryStream.
If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption
, Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.
The StartDeliveryStreamEncryption
and StopDeliveryStreamEncryption
operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption
13 times and StopDeliveryStreamEncryption
12 times for the same delivery stream in a 24-hour period.
Updates the specified destination of the specified delivery stream.
Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.
Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.
If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration
is maintained on the destination.
If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.
Kinesis Data Firehose uses CurrentDeliveryStreamVersionId
to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId
in the next call.
Updates the specified destination of the specified delivery stream.
Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.
Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.
If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration
is maintained on the destination.
If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified.
Firehose uses CurrentDeliveryStreamVersionId
to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId
in the next call.
Updates the specified destination of the specified delivery stream.
Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.
Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.
If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration
is maintained on the destination.
If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.
Kinesis Data Firehose uses CurrentDeliveryStreamVersionId
to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId
in the next call.
Updates the specified destination of the specified delivery stream.
Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.
Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.
If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration
is maintained on the destination.
If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified.
Firehose uses CurrentDeliveryStreamVersionId
to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId
in the next call.
(Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.
+(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.
*/ @property (nonatomic, strong) AWSLambdaDestinationConfig * _Nullable destinationConfig; @@ -957,7 +958,7 @@ typedef NS_ENUM(NSInteger, AWSLambdaUpdateRuntimeOn) { @property (nonatomic, strong) NSNumber * _Nullable enabled; /** -The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis – The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams – The ARN of the stream.
Amazon Simple Queue Service – The ARN of the queue.
Amazon Managed Streaming for Apache Kafka – The ARN of the cluster.
Amazon MQ – The ARN of the broker.
Amazon DocumentDB – The ARN of the DocumentDB change stream.
The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis – The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams – The ARN of the stream.
Amazon Simple Queue Service – The ARN of the queue.
Amazon Managed Streaming for Apache Kafka – The ARN of the cluster or the ARN of the VPC connection (for cross-account event source mappings).
Amazon MQ – The ARN of the broker.
Amazon DocumentDB – The ARN of the DocumentDB change stream.
The size of the function's /tmp
directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB.
The size of the function's /tmp
directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
The size of the function's /tmp
directory in MB. The default value is 512, but it can be any whole number between 512 and 10,240 MB.
The size of the function's /tmp
directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
(Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.
+(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.
*/ @property (nonatomic, strong) AWSLambdaDestinationConfig * _Nullable destinationConfig; @@ -1813,7 +1814,7 @@ typedef NS_ENUM(NSInteger, AWSLambdaUpdateRuntimeOn) { @property (nonatomic, strong) AWSLambdaEnvironmentResponse * _Nullable environment; /** -The size of the function’s /tmp
directory in MB. The default value is 512, but it can be any whole number between 512 and 10,240 MB.
The size of the function's /tmp
directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.
+Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. Lambda passes the ClientContext
object to your function for synchronous invocations only.
The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis – The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams – The ARN of the stream.
Amazon Simple Queue Service – The ARN of the queue.
Amazon Managed Streaming for Apache Kafka – The ARN of the cluster.
Amazon MQ – The ARN of the broker.
Amazon DocumentDB – The ARN of the DocumentDB change stream.
The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis – The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams – The ARN of the stream.
Amazon Simple Queue Service – The ARN of the queue.
Amazon Managed Streaming for Apache Kafka – The ARN of the cluster or the ARN of the VPC connection (for cross-account event source mappings).
Amazon MQ – The ARN of the broker.
Amazon DocumentDB – The ARN of the DocumentDB change stream.
A runtime identifier. For example, go1.x
.
The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
+A runtime identifier. For example, java21
.
The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
*/ @property (nonatomic, assign) AWSLambdaRuntime compatibleRuntime; @@ -3373,7 +3374,7 @@ typedef NS_ENUM(NSInteger, AWSLambdaUpdateRuntimeOn) { @property (nonatomic, assign) AWSLambdaArchitecture compatibleArchitecture; /** -A runtime identifier. For example, go1.x
.
The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
+A runtime identifier. For example, java21
.
The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
*/ @property (nonatomic, assign) AWSLambdaRuntime compatibleRuntime; @@ -3522,7 +3523,7 @@ typedef NS_ENUM(NSInteger, AWSLambdaUpdateRuntimeOn) { /** -Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level and lower.
+Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level of detail and lower, where TRACE
is the highest level and FATAL
is the lowest.
Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level and lower.
+Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level of detail and lower, where DEBUG
is the highest level and WARN
is the lowest.
The Amazon Resource Name (ARN) of the destination resource.
+The Amazon Resource Name (ARN) of the destination resource.
To retain records of asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.
To retain records of failed invocations from Kinesis and DynamoDB event sources, you can configure an Amazon SNS topic or Amazon SQS queue as the destination.
To retain records of failed invocations from self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.
*/ @property (nonatomic, strong) NSString * _Nullable destination; @@ -4268,7 +4269,7 @@ typedef NS_ENUM(NSInteger, AWSLambdaUpdateRuntimeOn) { @property (nonatomic, strong) NSNumber * _Nullable bisectBatchOnFunctionError; /** -(Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.
+(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.
*/ @property (nonatomic, strong) AWSLambdaDestinationConfig * _Nullable destinationConfig; @@ -4419,7 +4420,7 @@ typedef NS_ENUM(NSInteger, AWSLambdaUpdateRuntimeOn) { @property (nonatomic, strong) AWSLambdaEnvironment * _Nullable environment; /** -The size of the function's /tmp
directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB.
The size of the function's /tmp
directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
For asynchronous function invocation, use Invoke.
Invokes a function asynchronously.
\",\ + \"documentation\":\"For asynchronous function invocation, use Invoke.
Invokes a function asynchronously.
If you do use the InvokeAsync action, note that it doesn't support the use of X-Ray active tracing. Trace ID is not propagated to the function, even if X-Ray active tracing is turned on.
The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis â The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams â The ARN of the stream.
Amazon Simple Queue Service â The ARN of the queue.
Amazon Managed Streaming for Apache Kafka â The ARN of the cluster.
Amazon MQ â The ARN of the broker.
Amazon DocumentDB â The ARN of the DocumentDB change stream.
The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis â The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams â The ARN of the stream.
Amazon Simple Queue Service â The ARN of the queue.
Amazon Managed Streaming for Apache Kafka â The ARN of the cluster or the ARN of the VPC connection (for cross-account event source mappings).
Amazon MQ â The ARN of the broker.
Amazon DocumentDB â The ARN of the DocumentDB change stream.
(Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.
\"\ + \"documentation\":\"(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.
\"\ },\ \"MaximumRecordAgeInSeconds\":{\ \"shape\":\"MaximumRecordAgeInSeconds\",\ @@ -1986,7 +1986,7 @@ - (NSString *)definitionString { },\ \"EphemeralStorage\":{\ \"shape\":\"EphemeralStorage\",\ - \"documentation\":\"The size of the function's /tmp
directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB.
The size of the function's /tmp
directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
The size of the function's /tmp
directory.
The size of the function's /tmp
directory in MB. The default value is 512, but it can be any whole number between 512 and 10,240 MB.
The size of the function's /tmp
directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
(Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.
\"\ + \"documentation\":\"(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.
\"\ },\ \"Topics\":{\ \"shape\":\"Topics\",\ @@ -2850,7 +2850,7 @@ - (NSString *)definitionString { },\ \"EphemeralStorage\":{\ \"shape\":\"EphemeralStorage\",\ - \"documentation\":\"The size of the functionâs /tmp
directory in MB. The default value is 512, but it can be any whole number between 512 and 10,240 MB.
The size of the function's /tmp
directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.
\",\ + \"documentation\":\"Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. Lambda passes the ClientContext
object to your function for synchronous invocations only.
The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis â The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams â The ARN of the stream.
Amazon Simple Queue Service â The ARN of the queue.
Amazon Managed Streaming for Apache Kafka â The ARN of the cluster.
Amazon MQ â The ARN of the broker.
Amazon DocumentDB â The ARN of the DocumentDB change stream.
The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis â The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams â The ARN of the stream.
Amazon Simple Queue Service â The ARN of the queue.
Amazon Managed Streaming for Apache Kafka â The ARN of the cluster or the ARN of the VPC connection (for cross-account event source mappings).
Amazon MQ â The ARN of the broker.
Amazon DocumentDB â The ARN of the DocumentDB change stream.
A runtime identifier. For example, go1.x
.
The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
\",\ + \"documentation\":\"A runtime identifier. For example, java21
.
The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
\",\ \"location\":\"querystring\",\ \"locationName\":\"CompatibleRuntime\"\ },\ @@ -4417,7 +4417,7 @@ - (NSString *)definitionString { \"members\":{\ \"CompatibleRuntime\":{\ \"shape\":\"Runtime\",\ - \"documentation\":\"A runtime identifier. For example, go1.x
.
The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
\",\ + \"documentation\":\"A runtime identifier. For example, java21
.
The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
\",\ \"location\":\"querystring\",\ \"locationName\":\"CompatibleRuntime\"\ },\ @@ -4583,11 +4583,11 @@ - (NSString *)definitionString { },\ \"ApplicationLogLevel\":{\ \"shape\":\"ApplicationLogLevel\",\ - \"documentation\":\"Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level and lower.
\"\ + \"documentation\":\"Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level of detail and lower, where TRACE
is the highest level and FATAL
is the lowest.
Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level and lower.
\"\ + \"documentation\":\"Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level of detail and lower, where DEBUG
is the highest level and WARN
is the lowest.
The Amazon Resource Name (ARN) of the destination resource.
\"\ + \"documentation\":\"The Amazon Resource Name (ARN) of the destination resource.
To retain records of asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.
To retain records of failed invocations from Kinesis and DynamoDB event sources, you can configure an Amazon SNS topic or Amazon SQS queue as the destination.
To retain records of failed invocations from self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.
\"\ }\ },\ \"documentation\":\"A destination for events that failed processing.
\"\ @@ -5333,6 +5333,7 @@ - (NSString *)definitionString { \"dotnetcore2.1\",\ \"dotnetcore3.1\",\ \"dotnet6\",\ + \"dotnet8\",\ \"nodejs4.3-edge\",\ \"go1.x\",\ \"ruby2.5\",\ @@ -5888,7 +5889,7 @@ - (NSString *)definitionString { },\ \"DestinationConfig\":{\ \"shape\":\"DestinationConfig\",\ - \"documentation\":\"(Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.
\"\ + \"documentation\":\"(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.
\"\ },\ \"MaximumRecordAgeInSeconds\":{\ \"shape\":\"MaximumRecordAgeInSeconds\",\ @@ -6048,7 +6049,7 @@ - (NSString *)definitionString { },\ \"EphemeralStorage\":{\ \"shape\":\"EphemeralStorage\",\ - \"documentation\":\"The size of the function's /tmp
directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB.
The size of the function's /tmp
directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
For asynchronous function invocation, use Invoke.
Invokes a function asynchronously.
+For asynchronous function invocation, use Invoke.
Invokes a function asynchronously.
If you do use the InvokeAsync action, note that it doesn't support the use of X-Ray active tracing. Trace ID is not propagated to the function, even if X-Ray active tracing is turned on.
For asynchronous function invocation, use Invoke.
Invokes a function asynchronously.
+For asynchronous function invocation, use Invoke.
Invokes a function asynchronously.
If you do use the InvokeAsync action, note that it doesn't support the use of X-Ray active tracing. Trace ID is not propagated to the function, even if X-Ray active tracing is turned on.
For a list of attributes, see SetPlatformApplicationAttributes.
+For a list of attributes, see SetPlatformApplicationAttributes
.
PlatformApplicationArn is returned.
+PlatformApplicationArn
is returned.
For a list of attributes, see SetEndpointAttributes.
+For a list of attributes, see SetEndpointAttributes
.
PlatformApplicationArn returned from CreatePlatformApplication is used to create a an endpoint.
+PlatformApplicationArn
returned from CreatePlatformApplication is used to create a an endpoint.
A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the CreateTopic
action uses:
DeliveryPolicy
– The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName
– The display name to use for a topic with SMS subscriptions.
FifoTopic
– Set to true to create a FIFO topic.
Policy
– The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
SignatureVersion
– The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion
is set to 1
.
TracingConfig
– Tracing mode of an Amazon SNS topic. By default TracingConfig
is set to PassThrough
, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active
, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.
The following attribute applies only to server-side encryption:
KmsMasterKeyId
– The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.
The following attributes apply only to FIFO topics:
FifoTopic
– When this is set to true
, a FIFO topic is created.
ContentBasedDeduplication
– Enables content-based deduplication for FIFO topics.
By default, ContentBasedDeduplication
is set to false
. If you create a FIFO topic and this attribute is false
, you must specify a value for the MessageDeduplicationId
parameter for the Publish action.
When you set ContentBasedDeduplication
to true
, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
(Optional) To override the generated value, you can specify a value for the MessageDeduplicationId
parameter for the Publish
action.
A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the CreateTopic
action uses:
DeliveryPolicy
– The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName
– The display name to use for a topic with SMS subscriptions.
FifoTopic
– Set to true to create a FIFO topic.
Policy
– The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
SignatureVersion
– The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion
is set to 1
.
TracingConfig
– Tracing mode of an Amazon SNS topic. By default TracingConfig
is set to PassThrough
, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active
, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.
The following attribute applies only to server-side encryption:
KmsMasterKeyId
– The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.
The following attributes apply only to FIFO topics:
ArchivePolicy
– Adds or updates an inline policy document to archive messages stored in the specified Amazon SNS topic.
BeginningArchiveTime
– The earliest starting point at which a message in the topic’s archive can be replayed from. This point in time is based on the configured message retention period set by the topic’s message archiving policy.
ContentBasedDeduplication
– Enables content-based deduplication for FIFO topics.
By default, ContentBasedDeduplication
is set to false
. If you create a FIFO topic and this attribute is false
, you must specify a value for the MessageDeduplicationId
parameter for the Publish action.
When you set ContentBasedDeduplication
to true
, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
(Optional) To override the generated value, you can specify a value for the MessageDeduplicationId
parameter for the Publish
action.
Input for DeleteEndpoint action.
+Input for DeleteEndpoint
action.
EndpointArn of endpoint to delete.
+EndpointArn
of endpoint to delete.
Input for DeletePlatformApplication action.
+Input for DeletePlatformApplication
action.
PlatformApplicationArn of platform application object to delete.
+PlatformApplicationArn
of platform application object to delete.
Input for GetEndpointAttributes action.
+Input for GetEndpointAttributes
action.
EndpointArn for GetEndpointAttributes input.
+EndpointArn
for GetEndpointAttributes
input.
Response from GetEndpointAttributes of the EndpointArn.
+Response from GetEndpointAttributes
of the EndpointArn
.
Input for GetPlatformApplicationAttributes action.
+Input for GetPlatformApplicationAttributes
action.
PlatformApplicationArn for GetPlatformApplicationAttributesInput.
+PlatformApplicationArn
for GetPlatformApplicationAttributesInput.
Response for GetPlatformApplicationAttributes action.
+Response for GetPlatformApplicationAttributes
action.
Attributes include the following:
AppleCertificateExpiryDate
– The expiry date of the SSL certificate used to configure certificate-based authentication.
ApplePlatformTeamID
– The Apple developer account ID used to configure token-based authentication.
ApplePlatformBundleID
– The app identifier used to configure token-based authentication.
EventEndpointCreated
– Topic ARN to which EndpointCreated event notifications should be sent.
EventEndpointDeleted
– Topic ARN to which EndpointDeleted event notifications should be sent.
EventEndpointUpdated
– Topic ARN to which EndpointUpdate event notifications should be sent.
EventDeliveryFailure
– Topic ARN to which DeliveryFailure event notifications should be sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.
Attributes include the following:
AppleCertificateExpiryDate
– The expiry date of the SSL certificate used to configure certificate-based authentication.
ApplePlatformTeamID
– The Apple developer account ID used to configure token-based authentication.
ApplePlatformBundleID
– The app identifier used to configure token-based authentication.
AuthenticationMethod
– Returns the credential type used when sending push notifications from application to APNS/APNS_Sandbox, or application to GCM.
APNS – Returns the token or certificate.
GCM – Returns the token or key.
EventEndpointCreated
– Topic ARN to which EndpointCreated event notifications should be sent.
EventEndpointDeleted
– Topic ARN to which EndpointDeleted event notifications should be sent.
EventEndpointUpdated
– Topic ARN to which EndpointUpdate event notifications should be sent.
EventDeliveryFailure
– Topic ARN to which DeliveryFailure event notifications should be sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.
Input for ListEndpointsByPlatformApplication action.
+Input for ListEndpointsByPlatformApplication
action.
NextToken string is used when calling ListEndpointsByPlatformApplication action to retrieve additional records that are available after the first page results.
+NextToken
string is used when calling ListEndpointsByPlatformApplication
action to retrieve additional records that are available after the first page results.
PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action.
+PlatformApplicationArn
for ListEndpointsByPlatformApplicationInput
action.
Response for ListEndpointsByPlatformApplication action.
+Response for ListEndpointsByPlatformApplication
action.
Endpoints returned for ListEndpointsByPlatformApplication action.
+Endpoints returned for ListEndpointsByPlatformApplication
action.
NextToken string is returned when calling ListEndpointsByPlatformApplication action if additional records are available after the first page results.
+NextToken
string is returned when calling ListEndpointsByPlatformApplication
action if additional records are available after the first page results.
Input for ListPlatformApplications action.
+Input for ListPlatformApplications
action.
NextToken string is used when calling ListPlatformApplications action to retrieve additional records that are available after the first page results.
+NextToken
string is used when calling ListPlatformApplications
action to retrieve additional records that are available after the first page results.
Response for ListPlatformApplications action.
+Response for ListPlatformApplications
action.
NextToken string is returned when calling ListPlatformApplications action if additional records are available after the first page results.
+NextToken
string is returned when calling ListPlatformApplications
action if additional records are available after the first page results.
Platform applications returned when calling ListPlatformApplications action.
+Platform applications returned when calling ListPlatformApplications
action.
Input for SetEndpointAttributes action.
+Input for SetEndpointAttributes
action.
EndpointArn used for SetEndpointAttributes action.
+EndpointArn used for SetEndpointAttributes
action.
Input for SetPlatformApplicationAttributes action.
+Input for SetPlatformApplicationAttributes
action.
A map of the platform application attributes. Attributes in this map include the following:
PlatformCredential
– The credential received from the notification service.
For ADM, PlatformCredential
is client secret.
For Apple Services using certificate credentials, PlatformCredential
is private key.
For Apple Services using token credentials, PlatformCredential
is signing key.
For GCM (Firebase Cloud Messaging), PlatformCredential
is API key.
PlatformPrincipal
– The principal received from the notification service.
For ADM, PlatformPrincipal
is client id.
For Apple Services using certificate credentials, PlatformPrincipal
is SSL certificate.
For Apple Services using token credentials, PlatformPrincipal
is signing key ID.
For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal
.
EventEndpointCreated
– Topic ARN to which EndpointCreated
event notifications are sent.
EventEndpointDeleted
– Topic ARN to which EndpointDeleted
event notifications are sent.
EventEndpointUpdated
– Topic ARN to which EndpointUpdate
event notifications are sent.
EventDeliveryFailure
– Topic ARN to which DeliveryFailure
event notifications are sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.
SuccessFeedbackRoleArn
– IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.
FailureFeedbackRoleArn
– IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.
SuccessFeedbackSampleRate
– Sample rate percentage (0-100) of successfully delivered messages.
The following attributes only apply to APNs
token-based authentication:
ApplePlatformTeamID
– The identifier that's assigned to your Apple developer account team.
ApplePlatformBundleID
– The bundle identifier that's assigned to your iOS app.
A map of the platform application attributes. Attributes in this map include the following:
PlatformCredential
– The credential received from the notification service.
For ADM, PlatformCredential
is client secret.
For Apple Services using certificate credentials, PlatformCredential
is private key.
For Apple Services using token credentials, PlatformCredential
is signing key.
For GCM (Firebase Cloud Messaging) using key credentials, there is no PlatformPrincipal
. The PlatformCredential
is API key
.
For GCM (Firebase Cloud Messaging) using token credentials, there is no PlatformPrincipal
. The PlatformCredential
is a JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in string format and special characters must be ignored. To format the file correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq @json <<< cat service.json`
.
PlatformPrincipal
– The principal received from the notification service.
For ADM, PlatformPrincipal
is client id.
For Apple Services using certificate credentials, PlatformPrincipal
is SSL certificate.
For Apple Services using token credentials, PlatformPrincipal
is signing key ID.
For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal
.
EventEndpointCreated
– Topic ARN to which EndpointCreated
event notifications are sent.
EventEndpointDeleted
– Topic ARN to which EndpointDeleted
event notifications are sent.
EventEndpointUpdated
– Topic ARN to which EndpointUpdate
event notifications are sent.
EventDeliveryFailure
– Topic ARN to which DeliveryFailure
event notifications are sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.
SuccessFeedbackRoleArn
– IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.
FailureFeedbackRoleArn
– IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.
SuccessFeedbackSampleRate
– Sample rate percentage (0-100) of successfully delivered messages.
The following attributes only apply to APNs
token-based authentication:
ApplePlatformTeamID
– The identifier that's assigned to your Apple developer account team.
ApplePlatformBundleID
– The bundle identifier that's assigned to your iOS app.
PlatformApplicationArn for SetPlatformApplicationAttributes action.
+PlatformApplicationArn
for SetPlatformApplicationAttributes
action.
A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the Subscribe
action uses:
DeliveryPolicy
– The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
FilterPolicy
– The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic.
FilterPolicyScope
– This attribute lets you choose the filtering scope by using one of the following string value types:
MessageAttributes
(default) – The filter is applied on the message attributes.
MessageBody
– The filter is applied on the message body.
RawMessageDelivery
– When set to true
, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata.
RedrivePolicy
– When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.
The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions:
SubscriptionRoleArn
– The ARN of the IAM role that has the following:
Permission to write to the Kinesis Data Firehose delivery stream
Amazon SNS listed as a trusted entity
Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see Fanout to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide.
A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the Subscribe
action uses:
DeliveryPolicy
– The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
FilterPolicy
– The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic.
FilterPolicyScope
– This attribute lets you choose the filtering scope by using one of the following string value types:
MessageAttributes
(default) – The filter is applied on the message attributes.
MessageBody
– The filter is applied on the message body.
RawMessageDelivery
– When set to true
, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata.
RedrivePolicy
– When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.
The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions:
SubscriptionRoleArn
– The ARN of the IAM role that has the following:
Permission to write to the Kinesis Data Firehose delivery stream
Amazon SNS listed as a trusted entity
Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see Fanout to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide.
The following attributes apply only to FIFO topics:
ReplayPolicy
– Adds or updates an inline policy document for a subscription to replay messages stored in the specified Amazon SNS topic.
ReplayStatus
– Retrieves the status of the subscription message replay, which can be one of the following:
Completed
– The replay has successfully redelivered all messages, and is now delivering newly published messages. If an ending point was specified in the ReplayPolicy
then the subscription will no longer receive newly published messages.
In progress
– The replay is currently replaying the selected messages.
Failed
– The replay was unable to complete.
Pending
– The default state while the replay initiates.
Verifies an endpoint owner's intent to receive messages by validating the token sent to the endpoint by an earlier Subscribe
action. If the token is valid, the action creates a new subscription and returns its Amazon Resource Name (ARN). This call requires an AWS signature only when the AuthenticateOnUnsubscribe
flag is set to \\\"true\\\".
Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal
and PlatformCredential
attributes when using the CreatePlatformApplication
action.
PlatformPrincipal
and PlatformCredential
are received from the notification service.
For ADM
, PlatformPrincipal
is client id
and PlatformCredential
is client secret
.
For Baidu
, PlatformPrincipal
is API key
and PlatformCredential
is secret key
.
For APNS
and APNS_SANDBOX
using certificate credentials, PlatformPrincipal
is SSL certificate
and PlatformCredential
is private key
.
For APNS
and APNS_SANDBOX
using token credentials, PlatformPrincipal
is signing key ID
and PlatformCredential
is signing key
.
For GCM
(Firebase Cloud Messaging), there is no PlatformPrincipal
and the PlatformCredential
is API key
.
For MPNS
, PlatformPrincipal
is TLS certificate
and PlatformCredential
is private key
.
For WNS
, PlatformPrincipal
is Package Security Identifier
and PlatformCredential
is secret key
.
You can use the returned PlatformApplicationArn
as an attribute for the CreatePlatformEndpoint
action.
Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal
and PlatformCredential
attributes when using the CreatePlatformApplication
action.
PlatformPrincipal
and PlatformCredential
are received from the notification service.
For ADM
, PlatformPrincipal
is client id
and PlatformCredential
is client secret
.
For Baidu
, PlatformPrincipal
is API key
and PlatformCredential
is secret key
.
For APNS
and APNS_SANDBOX
using certificate credentials, PlatformPrincipal
is SSL certificate
and PlatformCredential
is private key
.
For APNS
and APNS_SANDBOX
using token credentials, PlatformPrincipal
is signing key ID
and PlatformCredential
is signing key
.
For GCM (Firebase Cloud Messaging) using key credentials, there is no PlatformPrincipal
. The PlatformCredential
is API key
.
For GCM (Firebase Cloud Messaging) using token credentials, there is no PlatformPrincipal
. The PlatformCredential
is a JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in string format and special characters must be ignored. To format the file correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq @json <<< cat service.json`
.
For MPNS
, PlatformPrincipal
is TLS certificate
and PlatformCredential
is private key
.
For WNS
, PlatformPrincipal
is Package Security Identifier
and PlatformCredential
is secret key
.
You can use the returned PlatformApplicationArn
as an attribute for the CreatePlatformEndpoint
action.
Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or email, or if the endpoint and the topic are not in the same Amazon Web Services account, the endpoint owner must run the ConfirmSubscription
action to confirm the subscription.
You call the ConfirmSubscription
action with the token from the subscription response. Confirmation tokens are valid for three days.
This action is throttled at 100 transactions per second (TPS).
\"\ + \"documentation\":\"Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or email, or if the endpoint and the topic are not in the same Amazon Web Services account, the endpoint owner must run the ConfirmSubscription
action to confirm the subscription.
You call the ConfirmSubscription
action with the token from the subscription response. Confirmation tokens are valid for two days.
This action is throttled at 100 transactions per second (TPS).
\"\ },\ \"TagResource\":{\ \"name\":\"TagResource\",\ @@ -1081,7 +1085,7 @@ - (NSString *)definitionString { },\ \"Attributes\":{\ \"shape\":\"MapStringToString\",\ - \"documentation\":\"For a list of attributes, see SetPlatformApplicationAttributes.
\"\ + \"documentation\":\"For a list of attributes, see SetPlatformApplicationAttributes
.
Input for CreatePlatformApplication action.
\"\ @@ -1091,7 +1095,7 @@ - (NSString *)definitionString { \"members\":{\ \"PlatformApplicationArn\":{\ \"shape\":\"String\",\ - \"documentation\":\"PlatformApplicationArn is returned.
\"\ + \"documentation\":\" PlatformApplicationArn
is returned.
Response from CreatePlatformApplication action.
\"\ @@ -1105,7 +1109,7 @@ - (NSString *)definitionString { \"members\":{\ \"PlatformApplicationArn\":{\ \"shape\":\"String\",\ - \"documentation\":\"PlatformApplicationArn returned from CreatePlatformApplication is used to create a an endpoint.
\"\ + \"documentation\":\" PlatformApplicationArn
returned from CreatePlatformApplication is used to create a an endpoint.
For a list of attributes, see SetEndpointAttributes.
\"\ + \"documentation\":\"For a list of attributes, see SetEndpointAttributes
.
Input for CreatePlatformEndpoint action.
\"\ @@ -1151,7 +1155,7 @@ - (NSString *)definitionString { },\ \"Attributes\":{\ \"shape\":\"TopicAttributesMap\",\ - \"documentation\":\"A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the CreateTopic
action uses:
DeliveryPolicy
â The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName
â The display name to use for a topic with SMS subscriptions.
FifoTopic
â Set to true to create a FIFO topic.
Policy
â The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
SignatureVersion
â The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion
is set to 1
.
TracingConfig
â Tracing mode of an Amazon SNS topic. By default TracingConfig
is set to PassThrough
, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active
, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.
The following attribute applies only to server-side encryption:
KmsMasterKeyId
â The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.
The following attributes apply only to FIFO topics:
FifoTopic
â When this is set to true
, a FIFO topic is created.
ContentBasedDeduplication
â Enables content-based deduplication for FIFO topics.
By default, ContentBasedDeduplication
is set to false
. If you create a FIFO topic and this attribute is false
, you must specify a value for the MessageDeduplicationId
parameter for the Publish action.
When you set ContentBasedDeduplication
to true
, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
(Optional) To override the generated value, you can specify a value for the MessageDeduplicationId
parameter for the Publish
action.
A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the CreateTopic
action uses:
DeliveryPolicy
â The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName
â The display name to use for a topic with SMS subscriptions.
FifoTopic
â Set to true to create a FIFO topic.
Policy
â The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
SignatureVersion
â The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion
is set to 1
.
TracingConfig
â Tracing mode of an Amazon SNS topic. By default TracingConfig
is set to PassThrough
, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active
, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics.
The following attribute applies only to server-side encryption:
KmsMasterKeyId
â The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference.
The following attributes apply only to FIFO topics:
ArchivePolicy
â Adds or updates an inline policy document to archive messages stored in the specified Amazon SNS topic.
BeginningArchiveTime
â The earliest starting point at which a message in the topicâs archive can be replayed from. This point in time is based on the configured message retention period set by the topicâs message archiving policy.
ContentBasedDeduplication
â Enables content-based deduplication for FIFO topics.
By default, ContentBasedDeduplication
is set to false
. If you create a FIFO topic and this attribute is false
, you must specify a value for the MessageDeduplicationId
parameter for the Publish action.
When you set ContentBasedDeduplication
to true
, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId
using the body of the message (but not the attributes of the message).
(Optional) To override the generated value, you can specify a value for the MessageDeduplicationId
parameter for the Publish
action.
EndpointArn of endpoint to delete.
\"\ + \"documentation\":\" EndpointArn
of endpoint to delete.
Input for DeleteEndpoint action.
\"\ + \"documentation\":\"Input for DeleteEndpoint
action.
PlatformApplicationArn of platform application object to delete.
\"\ + \"documentation\":\" PlatformApplicationArn
of platform application object to delete.
Input for DeletePlatformApplication action.
\"\ + \"documentation\":\"Input for DeletePlatformApplication
action.
EndpointArn for GetEndpointAttributes input.
\"\ + \"documentation\":\" EndpointArn
for GetEndpointAttributes
input.
Input for GetEndpointAttributes action.
\"\ + \"documentation\":\"Input for GetEndpointAttributes
action.
Attributes include the following:
CustomUserData
â arbitrary user data to associate with the endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format and less than 2KB.
Enabled
â flag that enables/disables delivery to the endpoint. Amazon SNS will set this to false when a notification service indicates to Amazon SNS that the endpoint is invalid. Users can set it back to true, typically after updating Token.
Token
â device token, also referred to as a registration id, for an app and mobile device. This is returned from the notification service when an app and mobile device are registered with the notification service.
The device token for the iOS platform is returned in lowercase.
Response from GetEndpointAttributes of the EndpointArn.
\"\ + \"documentation\":\"Response from GetEndpointAttributes
of the EndpointArn
.
PlatformApplicationArn for GetPlatformApplicationAttributesInput.
\"\ + \"documentation\":\" PlatformApplicationArn
for GetPlatformApplicationAttributesInput.
Input for GetPlatformApplicationAttributes action.
\"\ + \"documentation\":\"Input for GetPlatformApplicationAttributes
action.
Attributes include the following:
AppleCertificateExpiryDate
â The expiry date of the SSL certificate used to configure certificate-based authentication.
ApplePlatformTeamID
â The Apple developer account ID used to configure token-based authentication.
ApplePlatformBundleID
â The app identifier used to configure token-based authentication.
EventEndpointCreated
â Topic ARN to which EndpointCreated event notifications should be sent.
EventEndpointDeleted
â Topic ARN to which EndpointDeleted event notifications should be sent.
EventEndpointUpdated
â Topic ARN to which EndpointUpdate event notifications should be sent.
EventDeliveryFailure
â Topic ARN to which DeliveryFailure event notifications should be sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.
Attributes include the following:
AppleCertificateExpiryDate
â The expiry date of the SSL certificate used to configure certificate-based authentication.
ApplePlatformTeamID
â The Apple developer account ID used to configure token-based authentication.
ApplePlatformBundleID
â The app identifier used to configure token-based authentication.
AuthenticationMethod
â Returns the credential type used when sending push notifications from application to APNS/APNS_Sandbox, or application to GCM.
APNS â Returns the token or certificate.
GCM â Returns the token or key.
EventEndpointCreated
â Topic ARN to which EndpointCreated event notifications should be sent.
EventEndpointDeleted
â Topic ARN to which EndpointDeleted event notifications should be sent.
EventEndpointUpdated
â Topic ARN to which EndpointUpdate event notifications should be sent.
EventDeliveryFailure
â Topic ARN to which DeliveryFailure event notifications should be sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.
Response for GetPlatformApplicationAttributes action.
\"\ + \"documentation\":\"Response for GetPlatformApplicationAttributes
action.
Indicates that the specified state is not a valid state for an event source.
\",\ + \"error\":{\ + \"code\":\"InvalidState\",\ + \"httpStatusCode\":400,\ + \"senderFault\":true\ + },\ + \"exception\":true\ + },\ \"Iso2CountryCode\":{\ \"type\":\"string\",\ \"documentation\":\"The two-character code, in ISO 3166-1 alpha-2 format, for the country or region. For example, GB or US.\",\ @@ -1511,7 +1528,7 @@ - (NSString *)definitionString { \"members\":{\ \"message\":{\"shape\":\"string\"}\ },\ - \"documentation\":\"The request was rejected because the specified customer master key (CMK) isn't enabled.
\",\ + \"documentation\":\"The request was rejected because the specified Amazon Web Services KMS key isn't enabled.
\",\ \"error\":{\ \"code\":\"KMSDisabled\",\ \"httpStatusCode\":400,\ @@ -1524,7 +1541,7 @@ - (NSString *)definitionString { \"members\":{\ \"message\":{\"shape\":\"string\"}\ },\ - \"documentation\":\"The request was rejected because the state of the specified resource isn't valid for this request. For more information, see How Key State Affects Use of a Customer Master Key in the Key Management Service Developer Guide.
\",\ + \"documentation\":\"The request was rejected because the state of the specified resource isn't valid for this request. For more information, see Key states of Amazon Web Services KMS keys in the Key Management Service Developer Guide.
\",\ \"error\":{\ \"code\":\"KMSInvalidState\",\ \"httpStatusCode\":400,\ @@ -1596,28 +1613,28 @@ - (NSString *)definitionString { \"members\":{\ \"PlatformApplicationArn\":{\ \"shape\":\"String\",\ - \"documentation\":\"PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action.
\"\ + \"documentation\":\" PlatformApplicationArn
for ListEndpointsByPlatformApplicationInput
action.
NextToken string is used when calling ListEndpointsByPlatformApplication action to retrieve additional records that are available after the first page results.
\"\ + \"documentation\":\" NextToken
string is used when calling ListEndpointsByPlatformApplication
action to retrieve additional records that are available after the first page results.
Input for ListEndpointsByPlatformApplication action.
\"\ + \"documentation\":\"Input for ListEndpointsByPlatformApplication
action.
Endpoints returned for ListEndpointsByPlatformApplication action.
\"\ + \"documentation\":\"Endpoints returned for ListEndpointsByPlatformApplication
action.
NextToken string is returned when calling ListEndpointsByPlatformApplication action if additional records are available after the first page results.
\"\ + \"documentation\":\" NextToken
string is returned when calling ListEndpointsByPlatformApplication
action if additional records are available after the first page results.
Response for ListEndpointsByPlatformApplication action.
\"\ + \"documentation\":\"Response for ListEndpointsByPlatformApplication
action.
NextToken string is used when calling ListPlatformApplications action to retrieve additional records that are available after the first page results.
\"\ + \"documentation\":\" NextToken
string is used when calling ListPlatformApplications
action to retrieve additional records that are available after the first page results.
Input for ListPlatformApplications action.
\"\ + \"documentation\":\"Input for ListPlatformApplications
action.
Platform applications returned when calling ListPlatformApplications action.
\"\ + \"documentation\":\"Platform applications returned when calling ListPlatformApplications
action.
NextToken string is returned when calling ListPlatformApplications action if additional records are available after the first page results.
\"\ + \"documentation\":\" NextToken
string is returned when calling ListPlatformApplications
action if additional records are available after the first page results.
Response for ListPlatformApplications action.
\"\ + \"documentation\":\"Response for ListPlatformApplications
action.
The date and time when the phone number was created.
\"\ },\ \"PhoneNumber\":{\ - \"shape\":\"String\",\ + \"shape\":\"PhoneNumber\",\ \"documentation\":\"The phone number.
\"\ },\ \"Status\":{\ @@ -1979,7 +1999,8 @@ - (NSString *)definitionString { \"PhoneNumberString\":{\ \"type\":\"string\",\ \"max\":20,\ - \"pattern\":\"^(\\\\+[0-9]{8,}|[0-9]{0,9})$\"\ + \"pattern\":\"^(\\\\+[0-9]{8,}|[0-9]{0,9})$\",\ + \"sensitive\":true\ },\ \"PlatformApplication\":{\ \"type\":\"structure\",\ @@ -2118,7 +2139,7 @@ - (NSString *)definitionString { \"documentation\":\"If you don't specify a value for the TargetArn
parameter, you must specify a value for the PhoneNumber
or TopicArn
parameters.
The phone number to which you want to deliver an SMS message. Use E.164 format.
If you don't specify a value for the PhoneNumber
parameter, you must specify a value for the TargetArn
or TopicArn
parameters.
Input for RemovePermission action.
\"\ },\ + \"ReplayLimitExceededException\":{\ + \"type\":\"structure\",\ + \"members\":{\ + \"message\":{\"shape\":\"string\"}\ + },\ + \"documentation\":\"Indicates that the request parameter has exceeded the maximum number of concurrent message replays.
\",\ + \"error\":{\ + \"code\":\"ReplayLimitExceeded\",\ + \"httpStatusCode\":403,\ + \"senderFault\":true\ + },\ + \"exception\":true\ + },\ \"ResourceNotFoundException\":{\ \"type\":\"structure\",\ \"members\":{\ @@ -2254,14 +2288,14 @@ - (NSString *)definitionString { \"members\":{\ \"EndpointArn\":{\ \"shape\":\"String\",\ - \"documentation\":\"EndpointArn used for SetEndpointAttributes action.
\"\ + \"documentation\":\"EndpointArn used for SetEndpointAttributes
action.
A map of the endpoint attributes. Attributes in this map include the following:
CustomUserData
â arbitrary user data to associate with the endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format and less than 2KB.
Enabled
â flag that enables/disables delivery to the endpoint. Amazon SNS will set this to false when a notification service indicates to Amazon SNS that the endpoint is invalid. Users can set it back to true, typically after updating Token.
Token
â device token, also referred to as a registration id, for an app and mobile device. This is returned from the notification service when an app and mobile device are registered with the notification service.
Input for SetEndpointAttributes action.
\"\ + \"documentation\":\"Input for SetEndpointAttributes
action.
PlatformApplicationArn for SetPlatformApplicationAttributes action.
\"\ + \"documentation\":\" PlatformApplicationArn
for SetPlatformApplicationAttributes
action.
A map of the platform application attributes. Attributes in this map include the following:
PlatformCredential
â The credential received from the notification service.
For ADM, PlatformCredential
is client secret.
For Apple Services using certificate credentials, PlatformCredential
is private key.
For Apple Services using token credentials, PlatformCredential
is signing key.
For GCM (Firebase Cloud Messaging), PlatformCredential
is API key.
PlatformPrincipal
â The principal received from the notification service.
For ADM, PlatformPrincipal
is client id.
For Apple Services using certificate credentials, PlatformPrincipal
is SSL certificate.
For Apple Services using token credentials, PlatformPrincipal
is signing key ID.
For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal
.
EventEndpointCreated
â Topic ARN to which EndpointCreated
event notifications are sent.
EventEndpointDeleted
â Topic ARN to which EndpointDeleted
event notifications are sent.
EventEndpointUpdated
â Topic ARN to which EndpointUpdate
event notifications are sent.
EventDeliveryFailure
â Topic ARN to which DeliveryFailure
event notifications are sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.
SuccessFeedbackRoleArn
â IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.
FailureFeedbackRoleArn
â IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.
SuccessFeedbackSampleRate
â Sample rate percentage (0-100) of successfully delivered messages.
The following attributes only apply to APNs
token-based authentication:
ApplePlatformTeamID
â The identifier that's assigned to your Apple developer account team.
ApplePlatformBundleID
â The bundle identifier that's assigned to your iOS app.
A map of the platform application attributes. Attributes in this map include the following:
PlatformCredential
â The credential received from the notification service.
For ADM, PlatformCredential
is client secret.
For Apple Services using certificate credentials, PlatformCredential
is private key.
For Apple Services using token credentials, PlatformCredential
is signing key.
For GCM (Firebase Cloud Messaging) using key credentials, there is no PlatformPrincipal
. The PlatformCredential
is API key
.
For GCM (Firebase Cloud Messaging) using token credentials, there is no PlatformPrincipal
. The PlatformCredential
is a JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in string format and special characters must be ignored. To format the file correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq @json <<< cat service.json`
.
PlatformPrincipal
â The principal received from the notification service.
For ADM, PlatformPrincipal
is client id.
For Apple Services using certificate credentials, PlatformPrincipal
is SSL certificate.
For Apple Services using token credentials, PlatformPrincipal
is signing key ID.
For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal
.
EventEndpointCreated
â Topic ARN to which EndpointCreated
event notifications are sent.
EventEndpointDeleted
â Topic ARN to which EndpointDeleted
event notifications are sent.
EventEndpointUpdated
â Topic ARN to which EndpointUpdate
event notifications are sent.
EventDeliveryFailure
â Topic ARN to which DeliveryFailure
event notifications are sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.
SuccessFeedbackRoleArn
â IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.
FailureFeedbackRoleArn
â IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.
SuccessFeedbackSampleRate
â Sample rate percentage (0-100) of successfully delivered messages.
The following attributes only apply to APNs
token-based authentication:
ApplePlatformTeamID
â The identifier that's assigned to your Apple developer account team.
ApplePlatformBundleID
â The bundle identifier that's assigned to your iOS app.
Input for SetPlatformApplicationAttributes action.
\"\ + \"documentation\":\"Input for SetPlatformApplicationAttributes
action.
A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the Subscribe
action uses:
DeliveryPolicy
â The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
FilterPolicy
â The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic.
FilterPolicyScope
â This attribute lets you choose the filtering scope by using one of the following string value types:
MessageAttributes
(default) â The filter is applied on the message attributes.
MessageBody
â The filter is applied on the message body.
RawMessageDelivery
â When set to true
, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata.
RedrivePolicy
â When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.
The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions:
SubscriptionRoleArn
â The ARN of the IAM role that has the following:
Permission to write to the Kinesis Data Firehose delivery stream
Amazon SNS listed as a trusted entity
Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see Fanout to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide.
A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the Subscribe
action uses:
DeliveryPolicy
â The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
FilterPolicy
â The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic.
FilterPolicyScope
â This attribute lets you choose the filtering scope by using one of the following string value types:
MessageAttributes
(default) â The filter is applied on the message attributes.
MessageBody
â The filter is applied on the message body.
RawMessageDelivery
â When set to true
, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata.
RedrivePolicy
â When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.
The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions:
SubscriptionRoleArn
â The ARN of the IAM role that has the following:
Permission to write to the Kinesis Data Firehose delivery stream
Amazon SNS listed as a trusted entity
Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see Fanout to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide.
The following attributes apply only to FIFO topics:
ReplayPolicy
â Adds or updates an inline policy document for a subscription to replay messages stored in the specified Amazon SNS topic.
ReplayStatus
â Retrieves the status of the subscription message replay, which can be one of the following:
Completed
â The replay has successfully redelivered all messages, and is now delivering newly published messages. If an ending point was specified in the ReplayPolicy
then the subscription will no longer receive newly published messages.
In progress
â The replay is currently replaying the selected messages.
Failed
â The replay was unable to complete.
Pending
â The default state while the replay initiates.
Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal
and PlatformCredential
attributes when using the CreatePlatformApplication
action.
PlatformPrincipal
and PlatformCredential
are received from the notification service.
For ADM
, PlatformPrincipal
is client id
and PlatformCredential
is client secret
.
For Baidu
, PlatformPrincipal
is API key
and PlatformCredential
is secret key
.
For APNS
and APNS_SANDBOX
using certificate credentials, PlatformPrincipal
is SSL certificate
and PlatformCredential
is private key
.
For APNS
and APNS_SANDBOX
using token credentials, PlatformPrincipal
is signing key ID
and PlatformCredential
is signing key
.
For GCM
(Firebase Cloud Messaging), there is no PlatformPrincipal
and the PlatformCredential
is API key
.
For MPNS
, PlatformPrincipal
is TLS certificate
and PlatformCredential
is private key
.
For WNS
, PlatformPrincipal
is Package Security Identifier
and PlatformCredential
is secret key
.
You can use the returned PlatformApplicationArn
as an attribute for the CreatePlatformEndpoint
action.
Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal
and PlatformCredential
attributes when using the CreatePlatformApplication
action.
PlatformPrincipal
and PlatformCredential
are received from the notification service.
For ADM
, PlatformPrincipal
is client id
and PlatformCredential
is client secret
.
For Baidu
, PlatformPrincipal
is API key
and PlatformCredential
is secret key
.
For APNS
and APNS_SANDBOX
using certificate credentials, PlatformPrincipal
is SSL certificate
and PlatformCredential
is private key
.
For APNS
and APNS_SANDBOX
using token credentials, PlatformPrincipal
is signing key ID
and PlatformCredential
is signing key
.
For GCM (Firebase Cloud Messaging) using key credentials, there is no PlatformPrincipal
. The PlatformCredential
is API key
.
For GCM (Firebase Cloud Messaging) using token credentials, there is no PlatformPrincipal
. The PlatformCredential
is a JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in string format and special characters must be ignored. To format the file correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq @json <<< cat service.json`
.
For MPNS
, PlatformPrincipal
is TLS certificate
and PlatformCredential
is private key
.
For WNS
, PlatformPrincipal
is Package Security Identifier
and PlatformCredential
is secret key
.
You can use the returned PlatformApplicationArn
as an attribute for the CreatePlatformEndpoint
action.
Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal
and PlatformCredential
attributes when using the CreatePlatformApplication
action.
PlatformPrincipal
and PlatformCredential
are received from the notification service.
For ADM
, PlatformPrincipal
is client id
and PlatformCredential
is client secret
.
For Baidu
, PlatformPrincipal
is API key
and PlatformCredential
is secret key
.
For APNS
and APNS_SANDBOX
using certificate credentials, PlatformPrincipal
is SSL certificate
and PlatformCredential
is private key
.
For APNS
and APNS_SANDBOX
using token credentials, PlatformPrincipal
is signing key ID
and PlatformCredential
is signing key
.
For GCM
(Firebase Cloud Messaging), there is no PlatformPrincipal
and the PlatformCredential
is API key
.
For MPNS
, PlatformPrincipal
is TLS certificate
and PlatformCredential
is private key
.
For WNS
, PlatformPrincipal
is Package Security Identifier
and PlatformCredential
is secret key
.
You can use the returned PlatformApplicationArn
as an attribute for the CreatePlatformEndpoint
action.
Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal
and PlatformCredential
attributes when using the CreatePlatformApplication
action.
PlatformPrincipal
and PlatformCredential
are received from the notification service.
For ADM
, PlatformPrincipal
is client id
and PlatformCredential
is client secret
.
For Baidu
, PlatformPrincipal
is API key
and PlatformCredential
is secret key
.
For APNS
and APNS_SANDBOX
using certificate credentials, PlatformPrincipal
is SSL certificate
and PlatformCredential
is private key
.
For APNS
and APNS_SANDBOX
using token credentials, PlatformPrincipal
is signing key ID
and PlatformCredential
is signing key
.
For GCM (Firebase Cloud Messaging) using key credentials, there is no PlatformPrincipal
. The PlatformCredential
is API key
.
For GCM (Firebase Cloud Messaging) using token credentials, there is no PlatformPrincipal
. The PlatformCredential
is a JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in string format and special characters must be ignored. To format the file correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq @json <<< cat service.json`
.
For MPNS
, PlatformPrincipal
is TLS certificate
and PlatformCredential
is private key
.
For WNS
, PlatformPrincipal
is Package Security Identifier
and PlatformCredential
is secret key
.
You can use the returned PlatformApplicationArn
as an attribute for the CreatePlatformEndpoint
action.
Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or email, or if the endpoint and the topic are not in the same Amazon Web Services account, the endpoint owner must run the ConfirmSubscription
action to confirm the subscription.
You call the ConfirmSubscription
action with the token from the subscription response. Confirmation tokens are valid for three days.
This action is throttled at 100 transactions per second (TPS).
+Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or email, or if the endpoint and the topic are not in the same Amazon Web Services account, the endpoint owner must run the ConfirmSubscription
action to confirm the subscription.
You call the ConfirmSubscription
action with the token from the subscription response. Confirmation tokens are valid for two days.
This action is throttled at 100 transactions per second (TPS).
@param request A container for the necessary parameters to execute the Subscribe service method. - @return An instance of `AWSTask`. On successful execution, `task.result` will contain an instance of `AWSSNSSubscribeResponse`. On failed execution, `task.error` may contain an `NSError` with `AWSSNSErrorDomain` domain and the following error code: `AWSSNSErrorSubscriptionLimitExceeded`, `AWSSNSErrorFilterPolicyLimitExceeded`, `AWSSNSErrorInvalidParameter`, `AWSSNSErrorInternalError`, `AWSSNSErrorNotFound`, `AWSSNSErrorAuthorizationError`, `AWSSNSErrorInvalidSecurity`. + @return An instance of `AWSTask`. On successful execution, `task.result` will contain an instance of `AWSSNSSubscribeResponse`. On failed execution, `task.error` may contain an `NSError` with `AWSSNSErrorDomain` domain and the following error code: `AWSSNSErrorSubscriptionLimitExceeded`, `AWSSNSErrorFilterPolicyLimitExceeded`, `AWSSNSErrorReplayLimitExceeded`, `AWSSNSErrorInvalidParameter`, `AWSSNSErrorInternalError`, `AWSSNSErrorNotFound`, `AWSSNSErrorAuthorizationError`, `AWSSNSErrorInvalidSecurity`. @see AWSSNSSubscribeInput @see AWSSNSSubscribeResponse @@ -1082,12 +1082,12 @@ FOUNDATION_EXPORT NSString *const AWSSNSSDKVersion; - (AWSTaskSubscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or email, or if the endpoint and the topic are not in the same Amazon Web Services account, the endpoint owner must run the ConfirmSubscription
action to confirm the subscription.
You call the ConfirmSubscription
action with the token from the subscription response. Confirmation tokens are valid for three days.
This action is throttled at 100 transactions per second (TPS).
+Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or email, or if the endpoint and the topic are not in the same Amazon Web Services account, the endpoint owner must run the ConfirmSubscription
action to confirm the subscription.
You call the ConfirmSubscription
action with the token from the subscription response. Confirmation tokens are valid for two days.
This action is throttled at 100 transactions per second (TPS).
@param request A container for the necessary parameters to execute the Subscribe service method. @param completionHandler The completion handler to call when the load request is complete. `response` - A response object, or `nil` if the request failed. - `error` - An error object that indicates why the request failed, or `nil` if the request was successful. On failed execution, `error` may contain an `NSError` with `AWSSNSErrorDomain` domain and the following error code: `AWSSNSErrorSubscriptionLimitExceeded`, `AWSSNSErrorFilterPolicyLimitExceeded`, `AWSSNSErrorInvalidParameter`, `AWSSNSErrorInternalError`, `AWSSNSErrorNotFound`, `AWSSNSErrorAuthorizationError`, `AWSSNSErrorInvalidSecurity`. + `error` - An error object that indicates why the request failed, or `nil` if the request was successful. On failed execution, `error` may contain an `NSError` with `AWSSNSErrorDomain` domain and the following error code: `AWSSNSErrorSubscriptionLimitExceeded`, `AWSSNSErrorFilterPolicyLimitExceeded`, `AWSSNSErrorReplayLimitExceeded`, `AWSSNSErrorInvalidParameter`, `AWSSNSErrorInternalError`, `AWSSNSErrorNotFound`, `AWSSNSErrorAuthorizationError`, `AWSSNSErrorInvalidSecurity`. @see AWSSNSSubscribeInput @see AWSSNSSubscribeResponse diff --git a/AWSSNS/AWSSNSService.m b/AWSSNS/AWSSNSService.m index e6a8ecc1ae4..4afd42e3ddc 100644 --- a/AWSSNS/AWSSNSService.m +++ b/AWSSNS/AWSSNSService.m @@ -1,5 +1,5 @@ // -// Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// Copyright 2010-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). // You may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ #import "AWSSNSResources.h" static NSString *const AWSInfoSNS = @"SNS"; -NSString *const AWSSNSSDKVersion = @"2.33.10"; +NSString *const AWSSNSSDKVersion = @"2.34.0"; @interface AWSSNSResponseSerializer : AWSXMLResponseSerializer @@ -51,6 +51,7 @@ + (void)initialize { @"InvalidParameter" : @(AWSSNSErrorInvalidParameter), @"ParameterValueInvalid" : @(AWSSNSErrorInvalidParameterValue), @"InvalidSecurity" : @(AWSSNSErrorInvalidSecurity), + @"InvalidState" : @(AWSSNSErrorInvalidState), @"KMSAccessDenied" : @(AWSSNSErrorKMSAccessDenied), @"KMSDisabled" : @(AWSSNSErrorKMSDisabled), @"KMSInvalidState" : @(AWSSNSErrorKMSInvalidState), @@ -60,6 +61,7 @@ + (void)initialize { @"NotFound" : @(AWSSNSErrorNotFound), @"OptedOut" : @(AWSSNSErrorOptedOut), @"PlatformApplicationDisabled" : @(AWSSNSErrorPlatformApplicationDisabled), + @"ReplayLimitExceeded" : @(AWSSNSErrorReplayLimitExceeded), @"ResourceNotFound" : @(AWSSNSErrorResourceNotFound), @"StaleTag" : @(AWSSNSErrorStaleTag), @"SubscriptionLimitExceeded" : @(AWSSNSErrorSubscriptionLimitExceeded), diff --git a/AWSSNS/Info.plist b/AWSSNS/Info.plist index 00a733ed3c8..f13059556e3 100644 --- a/AWSSNS/Info.plist +++ b/AWSSNS/Info.plist @@ -15,7 +15,7 @@