1. Packages
  2. AWS Classic
  3. API Docs
  4. kinesis
  5. FirehoseDeliveryStream

Try AWS Native preview for resources not in the classic version.

AWS Classic v6.3.0 published on Thursday, Sep 28, 2023 by Pulumi

aws.kinesis.FirehoseDeliveryStream

Explore with Pulumi AI

aws logo

Try AWS Native preview for resources not in the classic version.

AWS Classic v6.3.0 published on Thursday, Sep 28, 2023 by Pulumi

    Provides a Kinesis Firehose Delivery Stream resource. Amazon Kinesis Firehose is a fully managed, elastic service to easily deliver real-time data streams to destinations such as Amazon S3 and Amazon Redshift.

    For more details, see the Amazon Kinesis Firehose Documentation.

    Example Usage

    Extended S3 Destination

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var bucket = new Aws.S3.BucketV2("bucket");
    
        var firehoseAssumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
        {
            Statements = new[]
            {
                new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
                {
                    Effect = "Allow",
                    Principals = new[]
                    {
                        new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
                        {
                            Type = "Service",
                            Identifiers = new[]
                            {
                                "firehose.amazonaws.com",
                            },
                        },
                    },
                    Actions = new[]
                    {
                        "sts:AssumeRole",
                    },
                },
            },
        });
    
        var firehoseRole = new Aws.Iam.Role("firehoseRole", new()
        {
            AssumeRolePolicy = firehoseAssumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
        });
    
        var lambdaAssumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
        {
            Statements = new[]
            {
                new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
                {
                    Effect = "Allow",
                    Principals = new[]
                    {
                        new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
                        {
                            Type = "Service",
                            Identifiers = new[]
                            {
                                "lambda.amazonaws.com",
                            },
                        },
                    },
                    Actions = new[]
                    {
                        "sts:AssumeRole",
                    },
                },
            },
        });
    
        var lambdaIam = new Aws.Iam.Role("lambdaIam", new()
        {
            AssumeRolePolicy = lambdaAssumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
        });
    
        var lambdaProcessor = new Aws.Lambda.Function("lambdaProcessor", new()
        {
            Code = new FileArchive("lambda.zip"),
            Role = lambdaIam.Arn,
            Handler = "exports.handler",
            Runtime = "nodejs16.x",
        });
    
        var extendedS3Stream = new Aws.Kinesis.FirehoseDeliveryStream("extendedS3Stream", new()
        {
            Destination = "extended_s3",
            ExtendedS3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs
            {
                RoleArn = firehoseRole.Arn,
                BucketArn = bucket.Arn,
                ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
                {
                    Enabled = true,
                    Processors = new[]
                    {
                        new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
                        {
                            Type = "Lambda",
                            Parameters = new[]
                            {
                                new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
                                {
                                    ParameterName = "LambdaArn",
                                    ParameterValue = lambdaProcessor.Arn.Apply(arn => $"{arn}:$LATEST"),
                                },
                            },
                        },
                    },
                },
            },
        });
    
        var bucketAcl = new Aws.S3.BucketAclV2("bucketAcl", new()
        {
            Bucket = bucket.Id,
            Acl = "private",
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		bucket, err := s3.NewBucketV2(ctx, "bucket", nil)
    		if err != nil {
    			return err
    		}
    		firehoseAssumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
    			Statements: []iam.GetPolicyDocumentStatement{
    				{
    					Effect: pulumi.StringRef("Allow"),
    					Principals: []iam.GetPolicyDocumentStatementPrincipal{
    						{
    							Type: "Service",
    							Identifiers: []string{
    								"firehose.amazonaws.com",
    							},
    						},
    					},
    					Actions: []string{
    						"sts:AssumeRole",
    					},
    				},
    			},
    		}, nil)
    		if err != nil {
    			return err
    		}
    		firehoseRole, err := iam.NewRole(ctx, "firehoseRole", &iam.RoleArgs{
    			AssumeRolePolicy: *pulumi.String(firehoseAssumeRole.Json),
    		})
    		if err != nil {
    			return err
    		}
    		lambdaAssumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
    			Statements: []iam.GetPolicyDocumentStatement{
    				{
    					Effect: pulumi.StringRef("Allow"),
    					Principals: []iam.GetPolicyDocumentStatementPrincipal{
    						{
    							Type: "Service",
    							Identifiers: []string{
    								"lambda.amazonaws.com",
    							},
    						},
    					},
    					Actions: []string{
    						"sts:AssumeRole",
    					},
    				},
    			},
    		}, nil)
    		if err != nil {
    			return err
    		}
    		lambdaIam, err := iam.NewRole(ctx, "lambdaIam", &iam.RoleArgs{
    			AssumeRolePolicy: *pulumi.String(lambdaAssumeRole.Json),
    		})
    		if err != nil {
    			return err
    		}
    		lambdaProcessor, err := lambda.NewFunction(ctx, "lambdaProcessor", &lambda.FunctionArgs{
    			Code:    pulumi.NewFileArchive("lambda.zip"),
    			Role:    lambdaIam.Arn,
    			Handler: pulumi.String("exports.handler"),
    			Runtime: pulumi.String("nodejs16.x"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "extendedS3Stream", &kinesis.FirehoseDeliveryStreamArgs{
    			Destination: pulumi.String("extended_s3"),
    			ExtendedS3Configuration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs{
    				RoleArn:   firehoseRole.Arn,
    				BucketArn: bucket.Arn,
    				ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs{
    					Enabled: pulumi.Bool(true),
    					Processors: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArray{
    						&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
    							Type: pulumi.String("Lambda"),
    							Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
    								&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
    									ParameterName: pulumi.String("LambdaArn"),
    									ParameterValue: lambdaProcessor.Arn.ApplyT(func(arn string) (string, error) {
    										return fmt.Sprintf("%v:$LATEST", arn), nil
    									}).(pulumi.StringOutput),
    								},
    							},
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = s3.NewBucketAclV2(ctx, "bucketAcl", &s3.BucketAclV2Args{
    			Bucket: bucket.ID(),
    			Acl:    pulumi.String("private"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.s3.BucketV2;
    import com.pulumi.aws.iam.IamFunctions;
    import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
    import com.pulumi.aws.iam.Role;
    import com.pulumi.aws.iam.RoleArgs;
    import com.pulumi.aws.lambda.Function;
    import com.pulumi.aws.lambda.FunctionArgs;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs;
    import com.pulumi.aws.s3.BucketAclV2;
    import com.pulumi.aws.s3.BucketAclV2Args;
    import com.pulumi.asset.FileArchive;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var bucket = new BucketV2("bucket");
    
            final var firehoseAssumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
                .statements(GetPolicyDocumentStatementArgs.builder()
                    .effect("Allow")
                    .principals(GetPolicyDocumentStatementPrincipalArgs.builder()
                        .type("Service")
                        .identifiers("firehose.amazonaws.com")
                        .build())
                    .actions("sts:AssumeRole")
                    .build())
                .build());
    
            var firehoseRole = new Role("firehoseRole", RoleArgs.builder()        
                .assumeRolePolicy(firehoseAssumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
                .build());
    
            final var lambdaAssumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
                .statements(GetPolicyDocumentStatementArgs.builder()
                    .effect("Allow")
                    .principals(GetPolicyDocumentStatementPrincipalArgs.builder()
                        .type("Service")
                        .identifiers("lambda.amazonaws.com")
                        .build())
                    .actions("sts:AssumeRole")
                    .build())
                .build());
    
            var lambdaIam = new Role("lambdaIam", RoleArgs.builder()        
                .assumeRolePolicy(lambdaAssumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
                .build());
    
            var lambdaProcessor = new Function("lambdaProcessor", FunctionArgs.builder()        
                .code(new FileArchive("lambda.zip"))
                .role(lambdaIam.arn())
                .handler("exports.handler")
                .runtime("nodejs16.x")
                .build());
    
            var extendedS3Stream = new FirehoseDeliveryStream("extendedS3Stream", FirehoseDeliveryStreamArgs.builder()        
                .destination("extended_s3")
                .extendedS3Configuration(FirehoseDeliveryStreamExtendedS3ConfigurationArgs.builder()
                    .roleArn(firehoseRole.arn())
                    .bucketArn(bucket.arn())
                    .processingConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs.builder()
                        .enabled("true")
                        .processors(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
                            .type("Lambda")
                            .parameters(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                                .parameterName("LambdaArn")
                                .parameterValue(lambdaProcessor.arn().applyValue(arn -> String.format("%s:$LATEST", arn)))
                                .build())
                            .build())
                        .build())
                    .build())
                .build());
    
            var bucketAcl = new BucketAclV2("bucketAcl", BucketAclV2Args.builder()        
                .bucket(bucket.id())
                .acl("private")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_aws as aws
    
    bucket = aws.s3.BucketV2("bucket")
    firehose_assume_role = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
        effect="Allow",
        principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
            type="Service",
            identifiers=["firehose.amazonaws.com"],
        )],
        actions=["sts:AssumeRole"],
    )])
    firehose_role = aws.iam.Role("firehoseRole", assume_role_policy=firehose_assume_role.json)
    lambda_assume_role = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
        effect="Allow",
        principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
            type="Service",
            identifiers=["lambda.amazonaws.com"],
        )],
        actions=["sts:AssumeRole"],
    )])
    lambda_iam = aws.iam.Role("lambdaIam", assume_role_policy=lambda_assume_role.json)
    lambda_processor = aws.lambda_.Function("lambdaProcessor",
        code=pulumi.FileArchive("lambda.zip"),
        role=lambda_iam.arn,
        handler="exports.handler",
        runtime="nodejs16.x")
    extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extendedS3Stream",
        destination="extended_s3",
        extended_s3_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs(
            role_arn=firehose_role.arn,
            bucket_arn=bucket.arn,
            processing_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs(
                enabled=True,
                processors=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
                    type="Lambda",
                    parameters=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
                        parameter_name="LambdaArn",
                        parameter_value=lambda_processor.arn.apply(lambda arn: f"{arn}:$LATEST"),
                    )],
                )],
            ),
        ))
    bucket_acl = aws.s3.BucketAclV2("bucketAcl",
        bucket=bucket.id,
        acl="private")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const bucket = new aws.s3.BucketV2("bucket", {});
    const firehoseAssumeRole = aws.iam.getPolicyDocument({
        statements: [{
            effect: "Allow",
            principals: [{
                type: "Service",
                identifiers: ["firehose.amazonaws.com"],
            }],
            actions: ["sts:AssumeRole"],
        }],
    });
    const firehoseRole = new aws.iam.Role("firehoseRole", {assumeRolePolicy: firehoseAssumeRole.then(firehoseAssumeRole => firehoseAssumeRole.json)});
    const lambdaAssumeRole = aws.iam.getPolicyDocument({
        statements: [{
            effect: "Allow",
            principals: [{
                type: "Service",
                identifiers: ["lambda.amazonaws.com"],
            }],
            actions: ["sts:AssumeRole"],
        }],
    });
    const lambdaIam = new aws.iam.Role("lambdaIam", {assumeRolePolicy: lambdaAssumeRole.then(lambdaAssumeRole => lambdaAssumeRole.json)});
    const lambdaProcessor = new aws.lambda.Function("lambdaProcessor", {
        code: new pulumi.asset.FileArchive("lambda.zip"),
        role: lambdaIam.arn,
        handler: "exports.handler",
        runtime: "nodejs16.x",
    });
    const extendedS3Stream = new aws.kinesis.FirehoseDeliveryStream("extendedS3Stream", {
        destination: "extended_s3",
        extendedS3Configuration: {
            roleArn: firehoseRole.arn,
            bucketArn: bucket.arn,
            processingConfiguration: {
                enabled: true,
                processors: [{
                    type: "Lambda",
                    parameters: [{
                        parameterName: "LambdaArn",
                        parameterValue: pulumi.interpolate`${lambdaProcessor.arn}:$LATEST`,
                    }],
                }],
            },
        },
    });
    const bucketAcl = new aws.s3.BucketAclV2("bucketAcl", {
        bucket: bucket.id,
        acl: "private",
    });
    
    resources:
      extendedS3Stream:
        type: aws:kinesis:FirehoseDeliveryStream
        properties:
          destination: extended_s3
          extendedS3Configuration:
            roleArn: ${firehoseRole.arn}
            bucketArn: ${bucket.arn}
            processingConfiguration:
              enabled: 'true'
              processors:
                - type: Lambda
                  parameters:
                    - parameterName: LambdaArn
                      parameterValue: ${lambdaProcessor.arn}:$LATEST
      bucket:
        type: aws:s3:BucketV2
      bucketAcl:
        type: aws:s3:BucketAclV2
        properties:
          bucket: ${bucket.id}
          acl: private
      firehoseRole:
        type: aws:iam:Role
        properties:
          assumeRolePolicy: ${firehoseAssumeRole.json}
      lambdaIam:
        type: aws:iam:Role
        properties:
          assumeRolePolicy: ${lambdaAssumeRole.json}
      lambdaProcessor:
        type: aws:lambda:Function
        properties:
          code:
            fn::FileArchive: lambda.zip
          role: ${lambdaIam.arn}
          handler: exports.handler
          runtime: nodejs16.x
    variables:
      firehoseAssumeRole:
        fn::invoke:
          Function: aws:iam:getPolicyDocument
          Arguments:
            statements:
              - effect: Allow
                principals:
                  - type: Service
                    identifiers:
                      - firehose.amazonaws.com
                actions:
                  - sts:AssumeRole
      lambdaAssumeRole:
        fn::invoke:
          Function: aws:iam:getPolicyDocument
          Arguments:
            statements:
              - effect: Allow
                principals:
                  - type: Service
                    identifiers:
                      - lambda.amazonaws.com
                actions:
                  - sts:AssumeRole
    

    Extended S3 Destination with dynamic partitioning

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var extendedS3Stream = new Aws.Kinesis.FirehoseDeliveryStream("extendedS3Stream", new()
        {
            Destination = "extended_s3",
            ExtendedS3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs
            {
                RoleArn = aws_iam_role.Firehose_role.Arn,
                BucketArn = aws_s3_bucket.Bucket.Arn,
                BufferingSize = 64,
                DynamicPartitioningConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs
                {
                    Enabled = true,
                },
                Prefix = "data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
                ErrorOutputPrefix = "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
                ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
                {
                    Enabled = true,
                    Processors = new[]
                    {
                        new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
                        {
                            Type = "RecordDeAggregation",
                            Parameters = new[]
                            {
                                new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
                                {
                                    ParameterName = "SubRecordType",
                                    ParameterValue = "JSON",
                                },
                            },
                        },
                        new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
                        {
                            Type = "AppendDelimiterToRecord",
                        },
                        new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
                        {
                            Type = "MetadataExtraction",
                            Parameters = new[]
                            {
                                new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
                                {
                                    ParameterName = "JsonParsingEngine",
                                    ParameterValue = "JQ-1.6",
                                },
                                new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
                                {
                                    ParameterName = "MetadataExtractionQuery",
                                    ParameterValue = "{customer_id:.customer_id}",
                                },
                            },
                        },
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := kinesis.NewFirehoseDeliveryStream(ctx, "extendedS3Stream", &kinesis.FirehoseDeliveryStreamArgs{
    			Destination: pulumi.String("extended_s3"),
    			ExtendedS3Configuration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs{
    				RoleArn:       pulumi.Any(aws_iam_role.Firehose_role.Arn),
    				BucketArn:     pulumi.Any(aws_s3_bucket.Bucket.Arn),
    				BufferingSize: pulumi.Int(64),
    				DynamicPartitioningConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs{
    					Enabled: pulumi.Bool(true),
    				},
    				Prefix:            pulumi.String("data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/"),
    				ErrorOutputPrefix: pulumi.String("errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/"),
    				ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs{
    					Enabled: pulumi.Bool(true),
    					Processors: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArray{
    						&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
    							Type: pulumi.String("RecordDeAggregation"),
    							Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
    								&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
    									ParameterName:  pulumi.String("SubRecordType"),
    									ParameterValue: pulumi.String("JSON"),
    								},
    							},
    						},
    						&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
    							Type: pulumi.String("AppendDelimiterToRecord"),
    						},
    						&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
    							Type: pulumi.String("MetadataExtraction"),
    							Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
    								&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
    									ParameterName:  pulumi.String("JsonParsingEngine"),
    									ParameterValue: pulumi.String("JQ-1.6"),
    								},
    								&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
    									ParameterName:  pulumi.String("MetadataExtractionQuery"),
    									ParameterValue: pulumi.String("{customer_id:.customer_id}"),
    								},
    							},
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var extendedS3Stream = new FirehoseDeliveryStream("extendedS3Stream", FirehoseDeliveryStreamArgs.builder()        
                .destination("extended_s3")
                .extendedS3Configuration(FirehoseDeliveryStreamExtendedS3ConfigurationArgs.builder()
                    .roleArn(aws_iam_role.firehose_role().arn())
                    .bucketArn(aws_s3_bucket.bucket().arn())
                    .bufferingSize(64)
                    .dynamicPartitioningConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs.builder()
                        .enabled("true")
                        .build())
                    .prefix("data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/")
                    .errorOutputPrefix("errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/")
                    .processingConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs.builder()
                        .enabled("true")
                        .processors(                    
                            FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
                                .type("RecordDeAggregation")
                                .parameters(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                                    .parameterName("SubRecordType")
                                    .parameterValue("JSON")
                                    .build())
                                .build(),
                            FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
                                .type("AppendDelimiterToRecord")
                                .build(),
                            FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
                                .type("MetadataExtraction")
                                .parameters(                            
                                    FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                                        .parameterName("JsonParsingEngine")
                                        .parameterValue("JQ-1.6")
                                        .build(),
                                    FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                                        .parameterName("MetadataExtractionQuery")
                                        .parameterValue("{customer_id:.customer_id}")
                                        .build())
                                .build())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_aws as aws
    
    extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extendedS3Stream",
        destination="extended_s3",
        extended_s3_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs(
            role_arn=aws_iam_role["firehose_role"]["arn"],
            bucket_arn=aws_s3_bucket["bucket"]["arn"],
            buffering_size=64,
            dynamic_partitioning_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs(
                enabled=True,
            ),
            prefix="data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
            error_output_prefix="errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
            processing_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs(
                enabled=True,
                processors=[
                    aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
                        type="RecordDeAggregation",
                        parameters=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
                            parameter_name="SubRecordType",
                            parameter_value="JSON",
                        )],
                    ),
                    aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
                        type="AppendDelimiterToRecord",
                    ),
                    aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
                        type="MetadataExtraction",
                        parameters=[
                            aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
                                parameter_name="JsonParsingEngine",
                                parameter_value="JQ-1.6",
                            ),
                            aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
                                parameter_name="MetadataExtractionQuery",
                                parameter_value="{customer_id:.customer_id}",
                            ),
                        ],
                    ),
                ],
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const extendedS3Stream = new aws.kinesis.FirehoseDeliveryStream("extendedS3Stream", {
        destination: "extended_s3",
        extendedS3Configuration: {
            roleArn: aws_iam_role.firehose_role.arn,
            bucketArn: aws_s3_bucket.bucket.arn,
            bufferingSize: 64,
            dynamicPartitioningConfiguration: {
                enabled: true,
            },
            prefix: "data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
            errorOutputPrefix: "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
            processingConfiguration: {
                enabled: true,
                processors: [
                    {
                        type: "RecordDeAggregation",
                        parameters: [{
                            parameterName: "SubRecordType",
                            parameterValue: "JSON",
                        }],
                    },
                    {
                        type: "AppendDelimiterToRecord",
                    },
                    {
                        type: "MetadataExtraction",
                        parameters: [
                            {
                                parameterName: "JsonParsingEngine",
                                parameterValue: "JQ-1.6",
                            },
                            {
                                parameterName: "MetadataExtractionQuery",
                                parameterValue: "{customer_id:.customer_id}",
                            },
                        ],
                    },
                ],
            },
        },
    });
    
    resources:
      extendedS3Stream:
        type: aws:kinesis:FirehoseDeliveryStream
        properties:
          destination: extended_s3
          extendedS3Configuration:
            roleArn: ${aws_iam_role.firehose_role.arn}
            bucketArn: ${aws_s3_bucket.bucket.arn}
            bufferingSize: 64
            dynamicPartitioningConfiguration:
              enabled: 'true'
            prefix: data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/
            errorOutputPrefix: errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/
            processingConfiguration:
              enabled: 'true'
              processors:
                - type: RecordDeAggregation
                  parameters:
                    - parameterName: SubRecordType
                      parameterValue: JSON
                - type: AppendDelimiterToRecord
                - type: MetadataExtraction
                  parameters:
                    - parameterName: JsonParsingEngine
                      parameterValue: JQ-1.6
                    - parameterName: MetadataExtractionQuery
                      parameterValue: '{customer_id:.customer_id}'
    

    to the S3 prefix.

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var extendedS3Stream = new Aws.Kinesis.FirehoseDeliveryStream("extendedS3Stream", new()
        {
            Destination = "extended_s3",
            ExtendedS3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs
            {
                RoleArn = aws_iam_role.Firehose_role.Arn,
                BucketArn = aws_s3_bucket.Bucket.Arn,
                BufferingSize = 64,
                DynamicPartitioningConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs
                {
                    Enabled = true,
                },
                Prefix = "data/store_id=!{partitionKeyFromQuery:store_id}/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
                ErrorOutputPrefix = "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
                ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
                {
                    Enabled = true,
                    Processors = new[]
                    {
                        new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
                        {
                            Type = "MetadataExtraction",
                            Parameters = new[]
                            {
                                new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
                                {
                                    ParameterName = "JsonParsingEngine",
                                    ParameterValue = "JQ-1.6",
                                },
                                new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
                                {
                                    ParameterName = "MetadataExtractionQuery",
                                    ParameterValue = "{store_id:.store_id,customer_id:.customer_id}",
                                },
                            },
                        },
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := kinesis.NewFirehoseDeliveryStream(ctx, "extendedS3Stream", &kinesis.FirehoseDeliveryStreamArgs{
    			Destination: pulumi.String("extended_s3"),
    			ExtendedS3Configuration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs{
    				RoleArn:       pulumi.Any(aws_iam_role.Firehose_role.Arn),
    				BucketArn:     pulumi.Any(aws_s3_bucket.Bucket.Arn),
    				BufferingSize: pulumi.Int(64),
    				DynamicPartitioningConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs{
    					Enabled: pulumi.Bool(true),
    				},
    				Prefix:            pulumi.String("data/store_id=!{partitionKeyFromQuery:store_id}/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/"),
    				ErrorOutputPrefix: pulumi.String("errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/"),
    				ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs{
    					Enabled: pulumi.Bool(true),
    					Processors: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArray{
    						&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
    							Type: pulumi.String("MetadataExtraction"),
    							Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
    								&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
    									ParameterName:  pulumi.String("JsonParsingEngine"),
    									ParameterValue: pulumi.String("JQ-1.6"),
    								},
    								&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
    									ParameterName:  pulumi.String("MetadataExtractionQuery"),
    									ParameterValue: pulumi.String("{store_id:.store_id,customer_id:.customer_id}"),
    								},
    							},
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var extendedS3Stream = new FirehoseDeliveryStream("extendedS3Stream", FirehoseDeliveryStreamArgs.builder()        
                .destination("extended_s3")
                .extendedS3Configuration(FirehoseDeliveryStreamExtendedS3ConfigurationArgs.builder()
                    .roleArn(aws_iam_role.firehose_role().arn())
                    .bucketArn(aws_s3_bucket.bucket().arn())
                    .bufferingSize(64)
                    .dynamicPartitioningConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs.builder()
                        .enabled("true")
                        .build())
                    .prefix("data/store_id=!{partitionKeyFromQuery:store_id}/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/")
                    .errorOutputPrefix("errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/")
                    .processingConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs.builder()
                        .enabled("true")
                        .processors(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
                            .type("MetadataExtraction")
                            .parameters(                        
                                FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                                    .parameterName("JsonParsingEngine")
                                    .parameterValue("JQ-1.6")
                                    .build(),
                                FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                                    .parameterName("MetadataExtractionQuery")
                                    .parameterValue("{store_id:.store_id,customer_id:.customer_id}")
                                    .build())
                            .build())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_aws as aws
    
    extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extendedS3Stream",
        destination="extended_s3",
        extended_s3_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs(
            role_arn=aws_iam_role["firehose_role"]["arn"],
            bucket_arn=aws_s3_bucket["bucket"]["arn"],
            buffering_size=64,
            dynamic_partitioning_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs(
                enabled=True,
            ),
            prefix="data/store_id=!{partitionKeyFromQuery:store_id}/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
            error_output_prefix="errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
            processing_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs(
                enabled=True,
                processors=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
                    type="MetadataExtraction",
                    parameters=[
                        aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
                            parameter_name="JsonParsingEngine",
                            parameter_value="JQ-1.6",
                        ),
                        aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
                            parameter_name="MetadataExtractionQuery",
                            parameter_value="{store_id:.store_id,customer_id:.customer_id}",
                        ),
                    ],
                )],
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const extendedS3Stream = new aws.kinesis.FirehoseDeliveryStream("extendedS3Stream", {
        destination: "extended_s3",
        extendedS3Configuration: {
            roleArn: aws_iam_role.firehose_role.arn,
            bucketArn: aws_s3_bucket.bucket.arn,
            bufferingSize: 64,
            dynamicPartitioningConfiguration: {
                enabled: true,
            },
            prefix: "data/store_id=!{partitionKeyFromQuery:store_id}/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
            errorOutputPrefix: "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
            processingConfiguration: {
                enabled: true,
                processors: [{
                    type: "MetadataExtraction",
                    parameters: [
                        {
                            parameterName: "JsonParsingEngine",
                            parameterValue: "JQ-1.6",
                        },
                        {
                            parameterName: "MetadataExtractionQuery",
                            parameterValue: "{store_id:.store_id,customer_id:.customer_id}",
                        },
                    ],
                }],
            },
        },
    });
    
    resources:
      extendedS3Stream:
        type: aws:kinesis:FirehoseDeliveryStream
        properties:
          destination: extended_s3
          extendedS3Configuration:
            roleArn: ${aws_iam_role.firehose_role.arn}
            bucketArn: ${aws_s3_bucket.bucket.arn}
            bufferingSize: 64
            dynamicPartitioningConfiguration:
              enabled: 'true'
            prefix: data/store_id=!{partitionKeyFromQuery:store_id}/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/
            errorOutputPrefix: errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/
            processingConfiguration:
              enabled: 'true'
              processors:
                - type: MetadataExtraction
                  parameters:
                    - parameterName: JsonParsingEngine
                      parameterValue: JQ-1.6
                    - parameterName: MetadataExtractionQuery
                      parameterValue: '{store_id:.store_id,customer_id:.customer_id}'
    

    Redshift Destination

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var testCluster = new Aws.RedShift.Cluster("testCluster", new()
        {
            ClusterIdentifier = "tf-redshift-cluster",
            DatabaseName = "test",
            MasterUsername = "testuser",
            MasterPassword = "T3stPass",
            NodeType = "dc1.large",
            ClusterType = "single-node",
        });
    
        var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
        {
            Destination = "redshift",
            RedshiftConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationArgs
            {
                RoleArn = aws_iam_role.Firehose_role.Arn,
                ClusterJdbcurl = Output.Tuple(testCluster.Endpoint, testCluster.DatabaseName).Apply(values =>
                {
                    var endpoint = values.Item1;
                    var databaseName = values.Item2;
                    return $"jdbc:redshift://{endpoint}/{databaseName}";
                }),
                Username = "testuser",
                Password = "T3stPass",
                DataTableName = "test-table",
                CopyOptions = "delimiter '|'",
                DataTableColumns = "test-col",
                S3BackupMode = "Enabled",
                S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs
                {
                    RoleArn = aws_iam_role.Firehose_role.Arn,
                    BucketArn = aws_s3_bucket.Bucket.Arn,
                    BufferingSize = 10,
                    BufferingInterval = 400,
                    CompressionFormat = "GZIP",
                },
                S3BackupConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs
                {
                    RoleArn = aws_iam_role.Firehose_role.Arn,
                    BucketArn = aws_s3_bucket.Bucket.Arn,
                    BufferingSize = 15,
                    BufferingInterval = 300,
                    CompressionFormat = "GZIP",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/redshift"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		testCluster, err := redshift.NewCluster(ctx, "testCluster", &redshift.ClusterArgs{
    			ClusterIdentifier: pulumi.String("tf-redshift-cluster"),
    			DatabaseName:      pulumi.String("test"),
    			MasterUsername:    pulumi.String("testuser"),
    			MasterPassword:    pulumi.String("T3stPass"),
    			NodeType:          pulumi.String("dc1.large"),
    			ClusterType:       pulumi.String("single-node"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
    			Destination: pulumi.String("redshift"),
    			RedshiftConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationArgs{
    				RoleArn: pulumi.Any(aws_iam_role.Firehose_role.Arn),
    				ClusterJdbcurl: pulumi.All(testCluster.Endpoint, testCluster.DatabaseName).ApplyT(func(_args []interface{}) (string, error) {
    					endpoint := _args[0].(string)
    					databaseName := _args[1].(string)
    					return fmt.Sprintf("jdbc:redshift://%v/%v", endpoint, databaseName), nil
    				}).(pulumi.StringOutput),
    				Username:         pulumi.String("testuser"),
    				Password:         pulumi.String("T3stPass"),
    				DataTableName:    pulumi.String("test-table"),
    				CopyOptions:      pulumi.String("delimiter '|'"),
    				DataTableColumns: pulumi.String("test-col"),
    				S3BackupMode:     pulumi.String("Enabled"),
    				S3Configuration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs{
    					RoleArn:           pulumi.Any(aws_iam_role.Firehose_role.Arn),
    					BucketArn:         pulumi.Any(aws_s3_bucket.Bucket.Arn),
    					BufferingSize:     pulumi.Int(10),
    					BufferingInterval: pulumi.Int(400),
    					CompressionFormat: pulumi.String("GZIP"),
    				},
    				S3BackupConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs{
    					RoleArn:           pulumi.Any(aws_iam_role.Firehose_role.Arn),
    					BucketArn:         pulumi.Any(aws_s3_bucket.Bucket.Arn),
    					BufferingSize:     pulumi.Int(15),
    					BufferingInterval: pulumi.Int(300),
    					CompressionFormat: pulumi.String("GZIP"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.redshift.Cluster;
    import com.pulumi.aws.redshift.ClusterArgs;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamRedshiftConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var testCluster = new Cluster("testCluster", ClusterArgs.builder()        
                .clusterIdentifier("tf-redshift-cluster")
                .databaseName("test")
                .masterUsername("testuser")
                .masterPassword("T3stPass")
                .nodeType("dc1.large")
                .clusterType("single-node")
                .build());
    
            var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()        
                .destination("redshift")
                .redshiftConfiguration(FirehoseDeliveryStreamRedshiftConfigurationArgs.builder()
                    .roleArn(aws_iam_role.firehose_role().arn())
                    .clusterJdbcurl(Output.tuple(testCluster.endpoint(), testCluster.databaseName()).applyValue(values -> {
                        var endpoint = values.t1;
                        var databaseName = values.t2;
                        return String.format("jdbc:redshift://%s/%s", endpoint,databaseName);
                    }))
                    .username("testuser")
                    .password("T3stPass")
                    .dataTableName("test-table")
                    .copyOptions("delimiter '|'")
                    .dataTableColumns("test-col")
                    .s3BackupMode("Enabled")
                    .s3Configuration(FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs.builder()
                        .roleArn(aws_iam_role.firehose_role().arn())
                        .bucketArn(aws_s3_bucket.bucket().arn())
                        .bufferingSize(10)
                        .bufferingInterval(400)
                        .compressionFormat("GZIP")
                        .build())
                    .s3BackupConfiguration(FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs.builder()
                        .roleArn(aws_iam_role.firehose_role().arn())
                        .bucketArn(aws_s3_bucket.bucket().arn())
                        .bufferingSize(15)
                        .bufferingInterval(300)
                        .compressionFormat("GZIP")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_aws as aws
    
    test_cluster = aws.redshift.Cluster("testCluster",
        cluster_identifier="tf-redshift-cluster",
        database_name="test",
        master_username="testuser",
        master_password="T3stPass",
        node_type="dc1.large",
        cluster_type="single-node")
    test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
        destination="redshift",
        redshift_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationArgs(
            role_arn=aws_iam_role["firehose_role"]["arn"],
            cluster_jdbcurl=pulumi.Output.all(test_cluster.endpoint, test_cluster.database_name).apply(lambda endpoint, database_name: f"jdbc:redshift://{endpoint}/{database_name}"),
            username="testuser",
            password="T3stPass",
            data_table_name="test-table",
            copy_options="delimiter '|'",
            data_table_columns="test-col",
            s3_backup_mode="Enabled",
            s3_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs(
                role_arn=aws_iam_role["firehose_role"]["arn"],
                bucket_arn=aws_s3_bucket["bucket"]["arn"],
                buffering_size=10,
                buffering_interval=400,
                compression_format="GZIP",
            ),
            s3_backup_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs(
                role_arn=aws_iam_role["firehose_role"]["arn"],
                bucket_arn=aws_s3_bucket["bucket"]["arn"],
                buffering_size=15,
                buffering_interval=300,
                compression_format="GZIP",
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const testCluster = new aws.redshift.Cluster("testCluster", {
        clusterIdentifier: "tf-redshift-cluster",
        databaseName: "test",
        masterUsername: "testuser",
        masterPassword: "T3stPass",
        nodeType: "dc1.large",
        clusterType: "single-node",
    });
    const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
        destination: "redshift",
        redshiftConfiguration: {
            roleArn: aws_iam_role.firehose_role.arn,
            clusterJdbcurl: pulumi.interpolate`jdbc:redshift://${testCluster.endpoint}/${testCluster.databaseName}`,
            username: "testuser",
            password: "T3stPass",
            dataTableName: "test-table",
            copyOptions: "delimiter '|'",
            dataTableColumns: "test-col",
            s3BackupMode: "Enabled",
            s3Configuration: {
                roleArn: aws_iam_role.firehose_role.arn,
                bucketArn: aws_s3_bucket.bucket.arn,
                bufferingSize: 10,
                bufferingInterval: 400,
                compressionFormat: "GZIP",
            },
            s3BackupConfiguration: {
                roleArn: aws_iam_role.firehose_role.arn,
                bucketArn: aws_s3_bucket.bucket.arn,
                bufferingSize: 15,
                bufferingInterval: 300,
                compressionFormat: "GZIP",
            },
        },
    });
    
    resources:
      testCluster:
        type: aws:redshift:Cluster
        properties:
          clusterIdentifier: tf-redshift-cluster
          databaseName: test
          masterUsername: testuser
          masterPassword: T3stPass
          nodeType: dc1.large
          clusterType: single-node
      testStream:
        type: aws:kinesis:FirehoseDeliveryStream
        properties:
          destination: redshift
          redshiftConfiguration:
            roleArn: ${aws_iam_role.firehose_role.arn}
            clusterJdbcurl: jdbc:redshift://${testCluster.endpoint}/${testCluster.databaseName}
            username: testuser
            password: T3stPass
            dataTableName: test-table
            copyOptions: delimiter '|'
            dataTableColumns: test-col
            s3BackupMode: Enabled
            s3Configuration:
              roleArn: ${aws_iam_role.firehose_role.arn}
              bucketArn: ${aws_s3_bucket.bucket.arn}
              bufferingSize: 10
              bufferingInterval: 400
              compressionFormat: GZIP
            s3BackupConfiguration:
              roleArn: ${aws_iam_role.firehose_role.arn}
              bucketArn: ${aws_s3_bucket.bucket.arn}
              bufferingSize: 15
              bufferingInterval: 300
              compressionFormat: GZIP
    

    Elasticsearch Destination

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var testCluster = new Aws.ElasticSearch.Domain("testCluster");
    
        var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
        {
            Destination = "elasticsearch",
            ElasticsearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs
            {
                DomainArn = testCluster.Arn,
                RoleArn = aws_iam_role.Firehose_role.Arn,
                IndexName = "test",
                TypeName = "test",
                S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs
                {
                    RoleArn = aws_iam_role.Firehose_role.Arn,
                    BucketArn = aws_s3_bucket.Bucket.Arn,
                    BufferingSize = 10,
                    BufferingInterval = 400,
                    CompressionFormat = "GZIP",
                },
                ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs
                {
                    Enabled = true,
                    Processors = new[]
                    {
                        new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs
                        {
                            Type = "Lambda",
                            Parameters = new[]
                            {
                                new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs
                                {
                                    ParameterName = "LambdaArn",
                                    ParameterValue = $"{aws_lambda_function.Lambda_processor.Arn}:$LATEST",
                                },
                            },
                        },
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/elasticsearch"
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		testCluster, err := elasticsearch.NewDomain(ctx, "testCluster", nil)
    		if err != nil {
    			return err
    		}
    		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
    			Destination: pulumi.String("elasticsearch"),
    			ElasticsearchConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs{
    				DomainArn: testCluster.Arn,
    				RoleArn:   pulumi.Any(aws_iam_role.Firehose_role.Arn),
    				IndexName: pulumi.String("test"),
    				TypeName:  pulumi.String("test"),
    				S3Configuration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs{
    					RoleArn:           pulumi.Any(aws_iam_role.Firehose_role.Arn),
    					BucketArn:         pulumi.Any(aws_s3_bucket.Bucket.Arn),
    					BufferingSize:     pulumi.Int(10),
    					BufferingInterval: pulumi.Int(400),
    					CompressionFormat: pulumi.String("GZIP"),
    				},
    				ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs{
    					Enabled: pulumi.Bool(true),
    					Processors: kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArray{
    						&kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs{
    							Type: pulumi.String("Lambda"),
    							Parameters: kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArray{
    								&kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs{
    									ParameterName:  pulumi.String("LambdaArn"),
    									ParameterValue: pulumi.String(fmt.Sprintf("%v:$LATEST", aws_lambda_function.Lambda_processor.Arn)),
    								},
    							},
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.elasticsearch.Domain;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var testCluster = new Domain("testCluster");
    
            var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()        
                .destination("elasticsearch")
                .elasticsearchConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationArgs.builder()
                    .domainArn(testCluster.arn())
                    .roleArn(aws_iam_role.firehose_role().arn())
                    .indexName("test")
                    .typeName("test")
                    .s3Configuration(FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs.builder()
                        .roleArn(aws_iam_role.firehose_role().arn())
                        .bucketArn(aws_s3_bucket.bucket().arn())
                        .bufferingSize(10)
                        .bufferingInterval(400)
                        .compressionFormat("GZIP")
                        .build())
                    .processingConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs.builder()
                        .enabled("true")
                        .processors(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs.builder()
                            .type("Lambda")
                            .parameters(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                                .parameterName("LambdaArn")
                                .parameterValue(String.format("%s:$LATEST", aws_lambda_function.lambda_processor().arn()))
                                .build())
                            .build())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_aws as aws
    
    test_cluster = aws.elasticsearch.Domain("testCluster")
    test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
        destination="elasticsearch",
        elasticsearch_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs(
            domain_arn=test_cluster.arn,
            role_arn=aws_iam_role["firehose_role"]["arn"],
            index_name="test",
            type_name="test",
            s3_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs(
                role_arn=aws_iam_role["firehose_role"]["arn"],
                bucket_arn=aws_s3_bucket["bucket"]["arn"],
                buffering_size=10,
                buffering_interval=400,
                compression_format="GZIP",
            ),
            processing_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs(
                enabled=True,
                processors=[aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs(
                    type="Lambda",
                    parameters=[aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs(
                        parameter_name="LambdaArn",
                        parameter_value=f"{aws_lambda_function['lambda_processor']['arn']}:$LATEST",
                    )],
                )],
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const testCluster = new aws.elasticsearch.Domain("testCluster", {});
    const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
        destination: "elasticsearch",
        elasticsearchConfiguration: {
            domainArn: testCluster.arn,
            roleArn: aws_iam_role.firehose_role.arn,
            indexName: "test",
            typeName: "test",
            s3Configuration: {
                roleArn: aws_iam_role.firehose_role.arn,
                bucketArn: aws_s3_bucket.bucket.arn,
                bufferingSize: 10,
                bufferingInterval: 400,
                compressionFormat: "GZIP",
            },
            processingConfiguration: {
                enabled: true,
                processors: [{
                    type: "Lambda",
                    parameters: [{
                        parameterName: "LambdaArn",
                        parameterValue: `${aws_lambda_function.lambda_processor.arn}:$LATEST`,
                    }],
                }],
            },
        },
    });
    
    resources:
      testCluster:
        type: aws:elasticsearch:Domain
      testStream:
        type: aws:kinesis:FirehoseDeliveryStream
        properties:
          destination: elasticsearch
          elasticsearchConfiguration:
            domainArn: ${testCluster.arn}
            roleArn: ${aws_iam_role.firehose_role.arn}
            indexName: test
            typeName: test
            s3Configuration:
              roleArn: ${aws_iam_role.firehose_role.arn}
              bucketArn: ${aws_s3_bucket.bucket.arn}
              bufferingSize: 10
              bufferingInterval: 400
              compressionFormat: GZIP
            processingConfiguration:
              enabled: 'true'
              processors:
                - type: Lambda
                  parameters:
                    - parameterName: LambdaArn
                      parameterValue: ${aws_lambda_function.lambda_processor.arn}:$LATEST
    

    Elasticsearch Destination With VPC

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var testCluster = new Aws.ElasticSearch.Domain("testCluster", new()
        {
            ClusterConfig = new Aws.ElasticSearch.Inputs.DomainClusterConfigArgs
            {
                InstanceCount = 2,
                ZoneAwarenessEnabled = true,
                InstanceType = "t2.small.elasticsearch",
            },
            EbsOptions = new Aws.ElasticSearch.Inputs.DomainEbsOptionsArgs
            {
                EbsEnabled = true,
                VolumeSize = 10,
            },
            VpcOptions = new Aws.ElasticSearch.Inputs.DomainVpcOptionsArgs
            {
                SecurityGroupIds = new[]
                {
                    aws_security_group.First.Id,
                },
                SubnetIds = new[]
                {
                    aws_subnet.First.Id,
                    aws_subnet.Second.Id,
                },
            },
        });
    
        var firehose_elasticsearchPolicyDocument = Aws.Iam.GetPolicyDocument.Invoke(new()
        {
            Statements = new[]
            {
                new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
                {
                    Effect = "Allow",
                    Actions = new[]
                    {
                        "es:*",
                    },
                    Resources = new[]
                    {
                        testCluster.Arn,
                        $"{testCluster.Arn}/*",
                    },
                },
                new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
                {
                    Effect = "Allow",
                    Actions = new[]
                    {
                        "ec2:DescribeVpcs",
                        "ec2:DescribeVpcAttribute",
                        "ec2:DescribeSubnets",
                        "ec2:DescribeSecurityGroups",
                        "ec2:DescribeNetworkInterfaces",
                        "ec2:CreateNetworkInterface",
                        "ec2:CreateNetworkInterfacePermission",
                        "ec2:DeleteNetworkInterface",
                    },
                    Resources = new[]
                    {
                        "*",
                    },
                },
            },
        });
    
        var firehose_elasticsearchRolePolicy = new Aws.Iam.RolePolicy("firehose-elasticsearchRolePolicy", new()
        {
            Role = aws_iam_role.Firehose.Id,
            Policy = firehose_elasticsearchPolicyDocument.Apply(firehose_elasticsearchPolicyDocument => firehose_elasticsearchPolicyDocument.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json)),
        });
    
        var test = new Aws.Kinesis.FirehoseDeliveryStream("test", new()
        {
            Destination = "elasticsearch",
            ElasticsearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs
            {
                DomainArn = testCluster.Arn,
                RoleArn = aws_iam_role.Firehose.Arn,
                IndexName = "test",
                TypeName = "test",
                S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs
                {
                    RoleArn = aws_iam_role.Firehose.Arn,
                    BucketArn = aws_s3_bucket.Bucket.Arn,
                },
                VpcConfig = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs
                {
                    SubnetIds = new[]
                    {
                        aws_subnet.First.Id,
                        aws_subnet.Second.Id,
                    },
                    SecurityGroupIds = new[]
                    {
                        aws_security_group.First.Id,
                    },
                    RoleArn = aws_iam_role.Firehose.Arn,
                },
            },
        }, new CustomResourceOptions
        {
            DependsOn = new[]
            {
                firehose_elasticsearchRolePolicy,
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/elasticsearch"
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		testCluster, err := elasticsearch.NewDomain(ctx, "testCluster", &elasticsearch.DomainArgs{
    			ClusterConfig: &elasticsearch.DomainClusterConfigArgs{
    				InstanceCount:        pulumi.Int(2),
    				ZoneAwarenessEnabled: pulumi.Bool(true),
    				InstanceType:         pulumi.String("t2.small.elasticsearch"),
    			},
    			EbsOptions: &elasticsearch.DomainEbsOptionsArgs{
    				EbsEnabled: pulumi.Bool(true),
    				VolumeSize: pulumi.Int(10),
    			},
    			VpcOptions: &elasticsearch.DomainVpcOptionsArgs{
    				SecurityGroupIds: pulumi.StringArray{
    					aws_security_group.First.Id,
    				},
    				SubnetIds: pulumi.StringArray{
    					aws_subnet.First.Id,
    					aws_subnet.Second.Id,
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		firehose_elasticsearchPolicyDocument := iam.GetPolicyDocumentOutput(ctx, iam.GetPolicyDocumentOutputArgs{
    			Statements: iam.GetPolicyDocumentStatementArray{
    				&iam.GetPolicyDocumentStatementArgs{
    					Effect: pulumi.String("Allow"),
    					Actions: pulumi.StringArray{
    						pulumi.String("es:*"),
    					},
    					Resources: pulumi.StringArray{
    						testCluster.Arn,
    						testCluster.Arn.ApplyT(func(arn string) (string, error) {
    							return fmt.Sprintf("%v/*", arn), nil
    						}).(pulumi.StringOutput),
    					},
    				},
    				&iam.GetPolicyDocumentStatementArgs{
    					Effect: pulumi.String("Allow"),
    					Actions: pulumi.StringArray{
    						pulumi.String("ec2:DescribeVpcs"),
    						pulumi.String("ec2:DescribeVpcAttribute"),
    						pulumi.String("ec2:DescribeSubnets"),
    						pulumi.String("ec2:DescribeSecurityGroups"),
    						pulumi.String("ec2:DescribeNetworkInterfaces"),
    						pulumi.String("ec2:CreateNetworkInterface"),
    						pulumi.String("ec2:CreateNetworkInterfacePermission"),
    						pulumi.String("ec2:DeleteNetworkInterface"),
    					},
    					Resources: pulumi.StringArray{
    						pulumi.String("*"),
    					},
    				},
    			},
    		}, nil)
    		_, err = iam.NewRolePolicy(ctx, "firehose-elasticsearchRolePolicy", &iam.RolePolicyArgs{
    			Role: pulumi.Any(aws_iam_role.Firehose.Id),
    			Policy: firehose_elasticsearchPolicyDocument.ApplyT(func(firehose_elasticsearchPolicyDocument iam.GetPolicyDocumentResult) (*string, error) {
    				return &firehose_elasticsearchPolicyDocument.Json, nil
    			}).(pulumi.StringPtrOutput),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test", &kinesis.FirehoseDeliveryStreamArgs{
    			Destination: pulumi.String("elasticsearch"),
    			ElasticsearchConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs{
    				DomainArn: testCluster.Arn,
    				RoleArn:   pulumi.Any(aws_iam_role.Firehose.Arn),
    				IndexName: pulumi.String("test"),
    				TypeName:  pulumi.String("test"),
    				S3Configuration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs{
    					RoleArn:   pulumi.Any(aws_iam_role.Firehose.Arn),
    					BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
    				},
    				VpcConfig: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs{
    					SubnetIds: pulumi.StringArray{
    						aws_subnet.First.Id,
    						aws_subnet.Second.Id,
    					},
    					SecurityGroupIds: pulumi.StringArray{
    						aws_security_group.First.Id,
    					},
    					RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
    				},
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			firehose_elasticsearchRolePolicy,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.elasticsearch.Domain;
    import com.pulumi.aws.elasticsearch.DomainArgs;
    import com.pulumi.aws.elasticsearch.inputs.DomainClusterConfigArgs;
    import com.pulumi.aws.elasticsearch.inputs.DomainEbsOptionsArgs;
    import com.pulumi.aws.elasticsearch.inputs.DomainVpcOptionsArgs;
    import com.pulumi.aws.iam.IamFunctions;
    import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
    import com.pulumi.aws.iam.RolePolicy;
    import com.pulumi.aws.iam.RolePolicyArgs;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var testCluster = new Domain("testCluster", DomainArgs.builder()        
                .clusterConfig(DomainClusterConfigArgs.builder()
                    .instanceCount(2)
                    .zoneAwarenessEnabled(true)
                    .instanceType("t2.small.elasticsearch")
                    .build())
                .ebsOptions(DomainEbsOptionsArgs.builder()
                    .ebsEnabled(true)
                    .volumeSize(10)
                    .build())
                .vpcOptions(DomainVpcOptionsArgs.builder()
                    .securityGroupIds(aws_security_group.first().id())
                    .subnetIds(                
                        aws_subnet.first().id(),
                        aws_subnet.second().id())
                    .build())
                .build());
    
            final var firehose-elasticsearchPolicyDocument = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
                .statements(            
                    GetPolicyDocumentStatementArgs.builder()
                        .effect("Allow")
                        .actions("es:*")
                        .resources(                    
                            testCluster.arn(),
                            testCluster.arn().applyValue(arn -> String.format("%s/*", arn)))
                        .build(),
                    GetPolicyDocumentStatementArgs.builder()
                        .effect("Allow")
                        .actions(                    
                            "ec2:DescribeVpcs",
                            "ec2:DescribeVpcAttribute",
                            "ec2:DescribeSubnets",
                            "ec2:DescribeSecurityGroups",
                            "ec2:DescribeNetworkInterfaces",
                            "ec2:CreateNetworkInterface",
                            "ec2:CreateNetworkInterfacePermission",
                            "ec2:DeleteNetworkInterface")
                        .resources("*")
                        .build())
                .build());
    
            var firehose_elasticsearchRolePolicy = new RolePolicy("firehose-elasticsearchRolePolicy", RolePolicyArgs.builder()        
                .role(aws_iam_role.firehose().id())
                .policy(firehose_elasticsearchPolicyDocument.applyValue(firehose_elasticsearchPolicyDocument -> firehose_elasticsearchPolicyDocument.json()))
                .build());
    
            var test = new FirehoseDeliveryStream("test", FirehoseDeliveryStreamArgs.builder()        
                .destination("elasticsearch")
                .elasticsearchConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationArgs.builder()
                    .domainArn(testCluster.arn())
                    .roleArn(aws_iam_role.firehose().arn())
                    .indexName("test")
                    .typeName("test")
                    .s3Configuration(FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs.builder()
                        .roleArn(aws_iam_role.firehose().arn())
                        .bucketArn(aws_s3_bucket.bucket().arn())
                        .build())
                    .vpcConfig(FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs.builder()
                        .subnetIds(                    
                            aws_subnet.first().id(),
                            aws_subnet.second().id())
                        .securityGroupIds(aws_security_group.first().id())
                        .roleArn(aws_iam_role.firehose().arn())
                        .build())
                    .build())
                .build(), CustomResourceOptions.builder()
                    .dependsOn(firehose_elasticsearchRolePolicy)
                    .build());
    
        }
    }
    
    import pulumi
    import pulumi_aws as aws
    
    test_cluster = aws.elasticsearch.Domain("testCluster",
        cluster_config=aws.elasticsearch.DomainClusterConfigArgs(
            instance_count=2,
            zone_awareness_enabled=True,
            instance_type="t2.small.elasticsearch",
        ),
        ebs_options=aws.elasticsearch.DomainEbsOptionsArgs(
            ebs_enabled=True,
            volume_size=10,
        ),
        vpc_options=aws.elasticsearch.DomainVpcOptionsArgs(
            security_group_ids=[aws_security_group["first"]["id"]],
            subnet_ids=[
                aws_subnet["first"]["id"],
                aws_subnet["second"]["id"],
            ],
        ))
    firehose_elasticsearch_policy_document = aws.iam.get_policy_document_output(statements=[
        aws.iam.GetPolicyDocumentStatementArgs(
            effect="Allow",
            actions=["es:*"],
            resources=[
                test_cluster.arn,
                test_cluster.arn.apply(lambda arn: f"{arn}/*"),
            ],
        ),
        aws.iam.GetPolicyDocumentStatementArgs(
            effect="Allow",
            actions=[
                "ec2:DescribeVpcs",
                "ec2:DescribeVpcAttribute",
                "ec2:DescribeSubnets",
                "ec2:DescribeSecurityGroups",
                "ec2:DescribeNetworkInterfaces",
                "ec2:CreateNetworkInterface",
                "ec2:CreateNetworkInterfacePermission",
                "ec2:DeleteNetworkInterface",
            ],
            resources=["*"],
        ),
    ])
    firehose_elasticsearch_role_policy = aws.iam.RolePolicy("firehose-elasticsearchRolePolicy",
        role=aws_iam_role["firehose"]["id"],
        policy=firehose_elasticsearch_policy_document.json)
    test = aws.kinesis.FirehoseDeliveryStream("test",
        destination="elasticsearch",
        elasticsearch_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs(
            domain_arn=test_cluster.arn,
            role_arn=aws_iam_role["firehose"]["arn"],
            index_name="test",
            type_name="test",
            s3_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs(
                role_arn=aws_iam_role["firehose"]["arn"],
                bucket_arn=aws_s3_bucket["bucket"]["arn"],
            ),
            vpc_config=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs(
                subnet_ids=[
                    aws_subnet["first"]["id"],
                    aws_subnet["second"]["id"],
                ],
                security_group_ids=[aws_security_group["first"]["id"]],
                role_arn=aws_iam_role["firehose"]["arn"],
            ),
        ),
        opts=pulumi.ResourceOptions(depends_on=[firehose_elasticsearch_role_policy]))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const testCluster = new aws.elasticsearch.Domain("testCluster", {
        clusterConfig: {
            instanceCount: 2,
            zoneAwarenessEnabled: true,
            instanceType: "t2.small.elasticsearch",
        },
        ebsOptions: {
            ebsEnabled: true,
            volumeSize: 10,
        },
        vpcOptions: {
            securityGroupIds: [aws_security_group.first.id],
            subnetIds: [
                aws_subnet.first.id,
                aws_subnet.second.id,
            ],
        },
    });
    const firehose-elasticsearchPolicyDocument = aws.iam.getPolicyDocumentOutput({
        statements: [
            {
                effect: "Allow",
                actions: ["es:*"],
                resources: [
                    testCluster.arn,
                    pulumi.interpolate`${testCluster.arn}/*`,
                ],
            },
            {
                effect: "Allow",
                actions: [
                    "ec2:DescribeVpcs",
                    "ec2:DescribeVpcAttribute",
                    "ec2:DescribeSubnets",
                    "ec2:DescribeSecurityGroups",
                    "ec2:DescribeNetworkInterfaces",
                    "ec2:CreateNetworkInterface",
                    "ec2:CreateNetworkInterfacePermission",
                    "ec2:DeleteNetworkInterface",
                ],
                resources: ["*"],
            },
        ],
    });
    const firehose_elasticsearchRolePolicy = new aws.iam.RolePolicy("firehose-elasticsearchRolePolicy", {
        role: aws_iam_role.firehose.id,
        policy: firehose_elasticsearchPolicyDocument.apply(firehose_elasticsearchPolicyDocument => firehose_elasticsearchPolicyDocument.json),
    });
    const test = new aws.kinesis.FirehoseDeliveryStream("test", {
        destination: "elasticsearch",
        elasticsearchConfiguration: {
            domainArn: testCluster.arn,
            roleArn: aws_iam_role.firehose.arn,
            indexName: "test",
            typeName: "test",
            s3Configuration: {
                roleArn: aws_iam_role.firehose.arn,
                bucketArn: aws_s3_bucket.bucket.arn,
            },
            vpcConfig: {
                subnetIds: [
                    aws_subnet.first.id,
                    aws_subnet.second.id,
                ],
                securityGroupIds: [aws_security_group.first.id],
                roleArn: aws_iam_role.firehose.arn,
            },
        },
    }, {
        dependsOn: [firehose_elasticsearchRolePolicy],
    });
    
    resources:
      testCluster:
        type: aws:elasticsearch:Domain
        properties:
          clusterConfig:
            instanceCount: 2
            zoneAwarenessEnabled: true
            instanceType: t2.small.elasticsearch
          ebsOptions:
            ebsEnabled: true
            volumeSize: 10
          vpcOptions:
            securityGroupIds:
              - ${aws_security_group.first.id}
            subnetIds:
              - ${aws_subnet.first.id}
              - ${aws_subnet.second.id}
      firehose-elasticsearchRolePolicy:
        type: aws:iam:RolePolicy
        properties:
          role: ${aws_iam_role.firehose.id}
          policy: ${["firehose-elasticsearchPolicyDocument"].json}
      test:
        type: aws:kinesis:FirehoseDeliveryStream
        properties:
          destination: elasticsearch
          elasticsearchConfiguration:
            domainArn: ${testCluster.arn}
            roleArn: ${aws_iam_role.firehose.arn}
            indexName: test
            typeName: test
            s3Configuration:
              roleArn: ${aws_iam_role.firehose.arn}
              bucketArn: ${aws_s3_bucket.bucket.arn}
            vpcConfig:
              subnetIds:
                - ${aws_subnet.first.id}
                - ${aws_subnet.second.id}
              securityGroupIds:
                - ${aws_security_group.first.id}
              roleArn: ${aws_iam_role.firehose.arn}
        options:
          dependson:
            - ${["firehose-elasticsearchRolePolicy"]}
    variables:
      firehose-elasticsearchPolicyDocument:
        fn::invoke:
          Function: aws:iam:getPolicyDocument
          Arguments:
            statements:
              - effect: Allow
                actions:
                  - es:*
                resources:
                  - ${testCluster.arn}
                  - ${testCluster.arn}/*
              - effect: Allow
                actions:
                  - ec2:DescribeVpcs
                  - ec2:DescribeVpcAttribute
                  - ec2:DescribeSubnets
                  - ec2:DescribeSecurityGroups
                  - ec2:DescribeNetworkInterfaces
                  - ec2:CreateNetworkInterface
                  - ec2:CreateNetworkInterfacePermission
                  - ec2:DeleteNetworkInterface
                resources:
                  - '*'
    

    Opensearch Destination

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var testCluster = new Aws.OpenSearch.Domain("testCluster");
    
        var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
        {
            Destination = "opensearch",
            OpensearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs
            {
                DomainArn = testCluster.Arn,
                RoleArn = aws_iam_role.Firehose_role.Arn,
                IndexName = "test",
                S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs
                {
                    RoleArn = aws_iam_role.Firehose_role.Arn,
                    BucketArn = aws_s3_bucket.Bucket.Arn,
                    BufferingSize = 10,
                    BufferingInterval = 400,
                    CompressionFormat = "GZIP",
                },
                ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs
                {
                    Enabled = true,
                    Processors = new[]
                    {
                        new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs
                        {
                            Type = "Lambda",
                            Parameters = new[]
                            {
                                new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs
                                {
                                    ParameterName = "LambdaArn",
                                    ParameterValue = $"{aws_lambda_function.Lambda_processor.Arn}:$LATEST",
                                },
                            },
                        },
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/opensearch"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		testCluster, err := opensearch.NewDomain(ctx, "testCluster", nil)
    		if err != nil {
    			return err
    		}
    		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
    			Destination: pulumi.String("opensearch"),
    			OpensearchConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs{
    				DomainArn: testCluster.Arn,
    				RoleArn:   pulumi.Any(aws_iam_role.Firehose_role.Arn),
    				IndexName: pulumi.String("test"),
    				S3Configuration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs{
    					RoleArn:           pulumi.Any(aws_iam_role.Firehose_role.Arn),
    					BucketArn:         pulumi.Any(aws_s3_bucket.Bucket.Arn),
    					BufferingSize:     pulumi.Int(10),
    					BufferingInterval: pulumi.Int(400),
    					CompressionFormat: pulumi.String("GZIP"),
    				},
    				ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs{
    					Enabled: pulumi.Bool(true),
    					Processors: kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArray{
    						&kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs{
    							Type: pulumi.String("Lambda"),
    							Parameters: kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArray{
    								&kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs{
    									ParameterName:  pulumi.String("LambdaArn"),
    									ParameterValue: pulumi.String(fmt.Sprintf("%v:$LATEST", aws_lambda_function.Lambda_processor.Arn)),
    								},
    							},
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.opensearch.Domain;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var testCluster = new Domain("testCluster");
    
            var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()        
                .destination("opensearch")
                .opensearchConfiguration(FirehoseDeliveryStreamOpensearchConfigurationArgs.builder()
                    .domainArn(testCluster.arn())
                    .roleArn(aws_iam_role.firehose_role().arn())
                    .indexName("test")
                    .s3Configuration(FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs.builder()
                        .roleArn(aws_iam_role.firehose_role().arn())
                        .bucketArn(aws_s3_bucket.bucket().arn())
                        .bufferingSize(10)
                        .bufferingInterval(400)
                        .compressionFormat("GZIP")
                        .build())
                    .processingConfiguration(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs.builder()
                        .enabled("true")
                        .processors(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs.builder()
                            .type("Lambda")
                            .parameters(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                                .parameterName("LambdaArn")
                                .parameterValue(String.format("%s:$LATEST", aws_lambda_function.lambda_processor().arn()))
                                .build())
                            .build())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_aws as aws
    
    test_cluster = aws.opensearch.Domain("testCluster")
    test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
        destination="opensearch",
        opensearch_configuration=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs(
            domain_arn=test_cluster.arn,
            role_arn=aws_iam_role["firehose_role"]["arn"],
            index_name="test",
            s3_configuration=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs(
                role_arn=aws_iam_role["firehose_role"]["arn"],
                bucket_arn=aws_s3_bucket["bucket"]["arn"],
                buffering_size=10,
                buffering_interval=400,
                compression_format="GZIP",
            ),
            processing_configuration=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs(
                enabled=True,
                processors=[aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs(
                    type="Lambda",
                    parameters=[aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs(
                        parameter_name="LambdaArn",
                        parameter_value=f"{aws_lambda_function['lambda_processor']['arn']}:$LATEST",
                    )],
                )],
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const testCluster = new aws.opensearch.Domain("testCluster", {});
    const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
        destination: "opensearch",
        opensearchConfiguration: {
            domainArn: testCluster.arn,
            roleArn: aws_iam_role.firehose_role.arn,
            indexName: "test",
            s3Configuration: {
                roleArn: aws_iam_role.firehose_role.arn,
                bucketArn: aws_s3_bucket.bucket.arn,
                bufferingSize: 10,
                bufferingInterval: 400,
                compressionFormat: "GZIP",
            },
            processingConfiguration: {
                enabled: true,
                processors: [{
                    type: "Lambda",
                    parameters: [{
                        parameterName: "LambdaArn",
                        parameterValue: `${aws_lambda_function.lambda_processor.arn}:$LATEST`,
                    }],
                }],
            },
        },
    });
    
    resources:
      testCluster:
        type: aws:opensearch:Domain
      testStream:
        type: aws:kinesis:FirehoseDeliveryStream
        properties:
          destination: opensearch
          opensearchConfiguration:
            domainArn: ${testCluster.arn}
            roleArn: ${aws_iam_role.firehose_role.arn}
            indexName: test
            s3Configuration:
              roleArn: ${aws_iam_role.firehose_role.arn}
              bucketArn: ${aws_s3_bucket.bucket.arn}
              bufferingSize: 10
              bufferingInterval: 400
              compressionFormat: GZIP
            processingConfiguration:
              enabled: 'true'
              processors:
                - type: Lambda
                  parameters:
                    - parameterName: LambdaArn
                      parameterValue: ${aws_lambda_function.lambda_processor.arn}:$LATEST
    

    Opensearch Destination With VPC

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var testCluster = new Aws.OpenSearch.Domain("testCluster", new()
        {
            ClusterConfig = new Aws.OpenSearch.Inputs.DomainClusterConfigArgs
            {
                InstanceCount = 2,
                ZoneAwarenessEnabled = true,
                InstanceType = "m4.large.search",
            },
            EbsOptions = new Aws.OpenSearch.Inputs.DomainEbsOptionsArgs
            {
                EbsEnabled = true,
                VolumeSize = 10,
            },
            VpcOptions = new Aws.OpenSearch.Inputs.DomainVpcOptionsArgs
            {
                SecurityGroupIds = new[]
                {
                    aws_security_group.First.Id,
                },
                SubnetIds = new[]
                {
                    aws_subnet.First.Id,
                    aws_subnet.Second.Id,
                },
            },
        });
    
        var firehose_opensearch = new Aws.Iam.RolePolicy("firehose-opensearch", new()
        {
            Role = aws_iam_role.Firehose.Id,
            Policy = Output.Tuple(testCluster.Arn, testCluster.Arn).Apply(values =>
            {
                var testClusterArn = values.Item1;
                var testClusterArn1 = values.Item2;
                return @$"{{
      ""Version"": ""2012-10-17"",
      ""Statement"": [
        {{
          ""Effect"": ""Allow"",
          ""Action"": [
            ""es:*""
          ],
          ""Resource"": [
            ""{testClusterArn}"",
            ""{testClusterArn1}/*""
          ]
            }},
            {{
              ""Effect"": ""Allow"",
              ""Action"": [
                ""ec2:DescribeVpcs"",
                ""ec2:DescribeVpcAttribute"",
                ""ec2:DescribeSubnets"",
                ""ec2:DescribeSecurityGroups"",
                ""ec2:DescribeNetworkInterfaces"",
                ""ec2:CreateNetworkInterface"",
                ""ec2:CreateNetworkInterfacePermission"",
                ""ec2:DeleteNetworkInterface""
              ],
              ""Resource"": [
                ""*""
              ]
            }}
      ]
    }}
    ";
            }),
        });
    
        var test = new Aws.Kinesis.FirehoseDeliveryStream("test", new()
        {
            Destination = "opensearch",
            OpensearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs
            {
                DomainArn = testCluster.Arn,
                RoleArn = aws_iam_role.Firehose.Arn,
                IndexName = "test",
                S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs
                {
                    RoleArn = aws_iam_role.Firehose.Arn,
                    BucketArn = aws_s3_bucket.Bucket.Arn,
                },
                VpcConfig = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs
                {
                    SubnetIds = new[]
                    {
                        aws_subnet.First.Id,
                        aws_subnet.Second.Id,
                    },
                    SecurityGroupIds = new[]
                    {
                        aws_security_group.First.Id,
                    },
                    RoleArn = aws_iam_role.Firehose.Arn,
                },
            },
        }, new CustomResourceOptions
        {
            DependsOn = new[]
            {
                firehose_opensearch,
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/opensearch"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		testCluster, err := opensearch.NewDomain(ctx, "testCluster", &opensearch.DomainArgs{
    			ClusterConfig: &opensearch.DomainClusterConfigArgs{
    				InstanceCount:        pulumi.Int(2),
    				ZoneAwarenessEnabled: pulumi.Bool(true),
    				InstanceType:         pulumi.String("m4.large.search"),
    			},
    			EbsOptions: &opensearch.DomainEbsOptionsArgs{
    				EbsEnabled: pulumi.Bool(true),
    				VolumeSize: pulumi.Int(10),
    			},
    			VpcOptions: &opensearch.DomainVpcOptionsArgs{
    				SecurityGroupIds: pulumi.StringArray{
    					aws_security_group.First.Id,
    				},
    				SubnetIds: pulumi.StringArray{
    					aws_subnet.First.Id,
    					aws_subnet.Second.Id,
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = iam.NewRolePolicy(ctx, "firehose-opensearch", &iam.RolePolicyArgs{
    			Role: pulumi.Any(aws_iam_role.Firehose.Id),
    			Policy: pulumi.All(testCluster.Arn, testCluster.Arn).ApplyT(func(_args []interface{}) (string, error) {
    				testClusterArn := _args[0].(string)
    				testClusterArn1 := _args[1].(string)
    				return fmt.Sprintf(`{
      "Version": "2012-10-17",
      "Statement": [
        {
          "Effect": "Allow",
          "Action": [
            "es:*"
          ],
          "Resource": [
            "%v",
            "%v/*"
          ]
            },
            {
              "Effect": "Allow",
              "Action": [
                "ec2:DescribeVpcs",
                "ec2:DescribeVpcAttribute",
                "ec2:DescribeSubnets",
                "ec2:DescribeSecurityGroups",
                "ec2:DescribeNetworkInterfaces",
                "ec2:CreateNetworkInterface",
                "ec2:CreateNetworkInterfacePermission",
                "ec2:DeleteNetworkInterface"
              ],
              "Resource": [
                "*"
              ]
            }
      ]
    }
    `, testClusterArn, testClusterArn1), nil
    			}).(pulumi.StringOutput),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test", &kinesis.FirehoseDeliveryStreamArgs{
    			Destination: pulumi.String("opensearch"),
    			OpensearchConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs{
    				DomainArn: testCluster.Arn,
    				RoleArn:   pulumi.Any(aws_iam_role.Firehose.Arn),
    				IndexName: pulumi.String("test"),
    				S3Configuration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs{
    					RoleArn:   pulumi.Any(aws_iam_role.Firehose.Arn),
    					BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
    				},
    				VpcConfig: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs{
    					SubnetIds: pulumi.StringArray{
    						aws_subnet.First.Id,
    						aws_subnet.Second.Id,
    					},
    					SecurityGroupIds: pulumi.StringArray{
    						aws_security_group.First.Id,
    					},
    					RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
    				},
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			firehose_opensearch,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.opensearch.Domain;
    import com.pulumi.aws.opensearch.DomainArgs;
    import com.pulumi.aws.opensearch.inputs.DomainClusterConfigArgs;
    import com.pulumi.aws.opensearch.inputs.DomainEbsOptionsArgs;
    import com.pulumi.aws.opensearch.inputs.DomainVpcOptionsArgs;
    import com.pulumi.aws.iam.RolePolicy;
    import com.pulumi.aws.iam.RolePolicyArgs;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var testCluster = new Domain("testCluster", DomainArgs.builder()        
                .clusterConfig(DomainClusterConfigArgs.builder()
                    .instanceCount(2)
                    .zoneAwarenessEnabled(true)
                    .instanceType("m4.large.search")
                    .build())
                .ebsOptions(DomainEbsOptionsArgs.builder()
                    .ebsEnabled(true)
                    .volumeSize(10)
                    .build())
                .vpcOptions(DomainVpcOptionsArgs.builder()
                    .securityGroupIds(aws_security_group.first().id())
                    .subnetIds(                
                        aws_subnet.first().id(),
                        aws_subnet.second().id())
                    .build())
                .build());
    
            var firehose_opensearch = new RolePolicy("firehose-opensearch", RolePolicyArgs.builder()        
                .role(aws_iam_role.firehose().id())
                .policy(Output.tuple(testCluster.arn(), testCluster.arn()).applyValue(values -> {
                    var testClusterArn = values.t1;
                    var testClusterArn1 = values.t2;
                    return """
    {
      "Version": "2012-10-17",
      "Statement": [
        {
          "Effect": "Allow",
          "Action": [
            "es:*"
          ],
          "Resource": [
            "%s",
            "%s/*"
          ]
            },
            {
              "Effect": "Allow",
              "Action": [
                "ec2:DescribeVpcs",
                "ec2:DescribeVpcAttribute",
                "ec2:DescribeSubnets",
                "ec2:DescribeSecurityGroups",
                "ec2:DescribeNetworkInterfaces",
                "ec2:CreateNetworkInterface",
                "ec2:CreateNetworkInterfacePermission",
                "ec2:DeleteNetworkInterface"
              ],
              "Resource": [
                "*"
              ]
            }
      ]
    }
    ", testClusterArn,testClusterArn1);
                }))
                .build());
    
            var test = new FirehoseDeliveryStream("test", FirehoseDeliveryStreamArgs.builder()        
                .destination("opensearch")
                .opensearchConfiguration(FirehoseDeliveryStreamOpensearchConfigurationArgs.builder()
                    .domainArn(testCluster.arn())
                    .roleArn(aws_iam_role.firehose().arn())
                    .indexName("test")
                    .s3Configuration(FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs.builder()
                        .roleArn(aws_iam_role.firehose().arn())
                        .bucketArn(aws_s3_bucket.bucket().arn())
                        .build())
                    .vpcConfig(FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs.builder()
                        .subnetIds(                    
                            aws_subnet.first().id(),
                            aws_subnet.second().id())
                        .securityGroupIds(aws_security_group.first().id())
                        .roleArn(aws_iam_role.firehose().arn())
                        .build())
                    .build())
                .build(), CustomResourceOptions.builder()
                    .dependsOn(firehose_opensearch)
                    .build());
    
        }
    }
    
    import pulumi
    import pulumi_aws as aws
    
    test_cluster = aws.opensearch.Domain("testCluster",
        cluster_config=aws.opensearch.DomainClusterConfigArgs(
            instance_count=2,
            zone_awareness_enabled=True,
            instance_type="m4.large.search",
        ),
        ebs_options=aws.opensearch.DomainEbsOptionsArgs(
            ebs_enabled=True,
            volume_size=10,
        ),
        vpc_options=aws.opensearch.DomainVpcOptionsArgs(
            security_group_ids=[aws_security_group["first"]["id"]],
            subnet_ids=[
                aws_subnet["first"]["id"],
                aws_subnet["second"]["id"],
            ],
        ))
    firehose_opensearch = aws.iam.RolePolicy("firehose-opensearch",
        role=aws_iam_role["firehose"]["id"],
        policy=pulumi.Output.all(test_cluster.arn, test_cluster.arn).apply(lambda testClusterArn, testClusterArn1: f"""{{
      "Version": "2012-10-17",
      "Statement": [
        {{
          "Effect": "Allow",
          "Action": [
            "es:*"
          ],
          "Resource": [
            "{test_cluster_arn}",
            "{test_cluster_arn1}/*"
          ]
            }},
            {{
              "Effect": "Allow",
              "Action": [
                "ec2:DescribeVpcs",
                "ec2:DescribeVpcAttribute",
                "ec2:DescribeSubnets",
                "ec2:DescribeSecurityGroups",
                "ec2:DescribeNetworkInterfaces",
                "ec2:CreateNetworkInterface",
                "ec2:CreateNetworkInterfacePermission",
                "ec2:DeleteNetworkInterface"
              ],
              "Resource": [
                "*"
              ]
            }}
      ]
    }}
    """))
    test = aws.kinesis.FirehoseDeliveryStream("test",
        destination="opensearch",
        opensearch_configuration=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs(
            domain_arn=test_cluster.arn,
            role_arn=aws_iam_role["firehose"]["arn"],
            index_name="test",
            s3_configuration=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs(
                role_arn=aws_iam_role["firehose"]["arn"],
                bucket_arn=aws_s3_bucket["bucket"]["arn"],
            ),
            vpc_config=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs(
                subnet_ids=[
                    aws_subnet["first"]["id"],
                    aws_subnet["second"]["id"],
                ],
                security_group_ids=[aws_security_group["first"]["id"]],
                role_arn=aws_iam_role["firehose"]["arn"],
            ),
        ),
        opts=pulumi.ResourceOptions(depends_on=[firehose_opensearch]))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const testCluster = new aws.opensearch.Domain("testCluster", {
        clusterConfig: {
            instanceCount: 2,
            zoneAwarenessEnabled: true,
            instanceType: "m4.large.search",
        },
        ebsOptions: {
            ebsEnabled: true,
            volumeSize: 10,
        },
        vpcOptions: {
            securityGroupIds: [aws_security_group.first.id],
            subnetIds: [
                aws_subnet.first.id,
                aws_subnet.second.id,
            ],
        },
    });
    const firehose_opensearch = new aws.iam.RolePolicy("firehose-opensearch", {
        role: aws_iam_role.firehose.id,
        policy: pulumi.interpolate`{
      "Version": "2012-10-17",
      "Statement": [
        {
          "Effect": "Allow",
          "Action": [
            "es:*"
          ],
          "Resource": [
            "${testCluster.arn}",
            "${testCluster.arn}/*"
          ]
            },
            {
              "Effect": "Allow",
              "Action": [
                "ec2:DescribeVpcs",
                "ec2:DescribeVpcAttribute",
                "ec2:DescribeSubnets",
                "ec2:DescribeSecurityGroups",
                "ec2:DescribeNetworkInterfaces",
                "ec2:CreateNetworkInterface",
                "ec2:CreateNetworkInterfacePermission",
                "ec2:DeleteNetworkInterface"
              ],
              "Resource": [
                "*"
              ]
            }
      ]
    }
    `,
    });
    const test = new aws.kinesis.FirehoseDeliveryStream("test", {
        destination: "opensearch",
        opensearchConfiguration: {
            domainArn: testCluster.arn,
            roleArn: aws_iam_role.firehose.arn,
            indexName: "test",
            s3Configuration: {
                roleArn: aws_iam_role.firehose.arn,
                bucketArn: aws_s3_bucket.bucket.arn,
            },
            vpcConfig: {
                subnetIds: [
                    aws_subnet.first.id,
                    aws_subnet.second.id,
                ],
                securityGroupIds: [aws_security_group.first.id],
                roleArn: aws_iam_role.firehose.arn,
            },
        },
    }, {
        dependsOn: [firehose_opensearch],
    });
    
    resources:
      testCluster:
        type: aws:opensearch:Domain
        properties:
          clusterConfig:
            instanceCount: 2
            zoneAwarenessEnabled: true
            instanceType: m4.large.search
          ebsOptions:
            ebsEnabled: true
            volumeSize: 10
          vpcOptions:
            securityGroupIds:
              - ${aws_security_group.first.id}
            subnetIds:
              - ${aws_subnet.first.id}
              - ${aws_subnet.second.id}
      firehose-opensearch:
        type: aws:iam:RolePolicy
        properties:
          role: ${aws_iam_role.firehose.id}
          policy: |
            {
              "Version": "2012-10-17",
              "Statement": [
                {
                  "Effect": "Allow",
                  "Action": [
                    "es:*"
                  ],
                  "Resource": [
                    "${testCluster.arn}",
                    "${testCluster.arn}/*"
                  ]
                    },
                    {
                      "Effect": "Allow",
                      "Action": [
                        "ec2:DescribeVpcs",
                        "ec2:DescribeVpcAttribute",
                        "ec2:DescribeSubnets",
                        "ec2:DescribeSecurityGroups",
                        "ec2:DescribeNetworkInterfaces",
                        "ec2:CreateNetworkInterface",
                        "ec2:CreateNetworkInterfacePermission",
                        "ec2:DeleteNetworkInterface"
                      ],
                      "Resource": [
                        "*"
                      ]
                    }
              ]
            }        
      test:
        type: aws:kinesis:FirehoseDeliveryStream
        properties:
          destination: opensearch
          opensearchConfiguration:
            domainArn: ${testCluster.arn}
            roleArn: ${aws_iam_role.firehose.arn}
            indexName: test
            s3Configuration:
              roleArn: ${aws_iam_role.firehose.arn}
              bucketArn: ${aws_s3_bucket.bucket.arn}
            vpcConfig:
              subnetIds:
                - ${aws_subnet.first.id}
                - ${aws_subnet.second.id}
              securityGroupIds:
                - ${aws_security_group.first.id}
              roleArn: ${aws_iam_role.firehose.arn}
        options:
          dependson:
            - ${["firehose-opensearch"]}
    

    Splunk Destination

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
        {
            Destination = "splunk",
            SplunkConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationArgs
            {
                HecEndpoint = "https://http-inputs-mydomain.splunkcloud.com:443",
                HecToken = "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
                HecAcknowledgmentTimeout = 600,
                HecEndpointType = "Event",
                S3BackupMode = "FailedEventsOnly",
                S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs
                {
                    RoleArn = aws_iam_role.Firehose.Arn,
                    BucketArn = aws_s3_bucket.Bucket.Arn,
                    BufferingSize = 10,
                    BufferingInterval = 400,
                    CompressionFormat = "GZIP",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
    			Destination: pulumi.String("splunk"),
    			SplunkConfiguration: &kinesis.FirehoseDeliveryStreamSplunkConfigurationArgs{
    				HecEndpoint:              pulumi.String("https://http-inputs-mydomain.splunkcloud.com:443"),
    				HecToken:                 pulumi.String("51D4DA16-C61B-4F5F-8EC7-ED4301342A4A"),
    				HecAcknowledgmentTimeout: pulumi.Int(600),
    				HecEndpointType:          pulumi.String("Event"),
    				S3BackupMode:             pulumi.String("FailedEventsOnly"),
    				S3Configuration: &kinesis.FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs{
    					RoleArn:           pulumi.Any(aws_iam_role.Firehose.Arn),
    					BucketArn:         pulumi.Any(aws_s3_bucket.Bucket.Arn),
    					BufferingSize:     pulumi.Int(10),
    					BufferingInterval: pulumi.Int(400),
    					CompressionFormat: pulumi.String("GZIP"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamSplunkConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()        
                .destination("splunk")
                .splunkConfiguration(FirehoseDeliveryStreamSplunkConfigurationArgs.builder()
                    .hecEndpoint("https://http-inputs-mydomain.splunkcloud.com:443")
                    .hecToken("51D4DA16-C61B-4F5F-8EC7-ED4301342A4A")
                    .hecAcknowledgmentTimeout(600)
                    .hecEndpointType("Event")
                    .s3BackupMode("FailedEventsOnly")
                    .s3Configuration(FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs.builder()
                        .roleArn(aws_iam_role.firehose().arn())
                        .bucketArn(aws_s3_bucket.bucket().arn())
                        .bufferingSize(10)
                        .bufferingInterval(400)
                        .compressionFormat("GZIP")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_aws as aws
    
    test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
        destination="splunk",
        splunk_configuration=aws.kinesis.FirehoseDeliveryStreamSplunkConfigurationArgs(
            hec_endpoint="https://http-inputs-mydomain.splunkcloud.com:443",
            hec_token="51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
            hec_acknowledgment_timeout=600,
            hec_endpoint_type="Event",
            s3_backup_mode="FailedEventsOnly",
            s3_configuration=aws.kinesis.FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs(
                role_arn=aws_iam_role["firehose"]["arn"],
                bucket_arn=aws_s3_bucket["bucket"]["arn"],
                buffering_size=10,
                buffering_interval=400,
                compression_format="GZIP",
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
        destination: "splunk",
        splunkConfiguration: {
            hecEndpoint: "https://http-inputs-mydomain.splunkcloud.com:443",
            hecToken: "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
            hecAcknowledgmentTimeout: 600,
            hecEndpointType: "Event",
            s3BackupMode: "FailedEventsOnly",
            s3Configuration: {
                roleArn: aws_iam_role.firehose.arn,
                bucketArn: aws_s3_bucket.bucket.arn,
                bufferingSize: 10,
                bufferingInterval: 400,
                compressionFormat: "GZIP",
            },
        },
    });
    
    resources:
      testStream:
        type: aws:kinesis:FirehoseDeliveryStream
        properties:
          destination: splunk
          splunkConfiguration:
            hecEndpoint: https://http-inputs-mydomain.splunkcloud.com:443
            hecToken: 51D4DA16-C61B-4F5F-8EC7-ED4301342A4A
            hecAcknowledgmentTimeout: 600
            hecEndpointType: Event
            s3BackupMode: FailedEventsOnly
            s3Configuration:
              roleArn: ${aws_iam_role.firehose.arn}
              bucketArn: ${aws_s3_bucket.bucket.arn}
              bufferingSize: 10
              bufferingInterval: 400
              compressionFormat: GZIP
    

    HTTP Endpoint (e.g., New Relic) Destination

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
        {
            Destination = "http_endpoint",
            HttpEndpointConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationArgs
            {
                Url = "https://aws-api.newrelic.com/firehose/v1",
                Name = "New Relic",
                AccessKey = "my-key",
                BufferingSize = 15,
                BufferingInterval = 600,
                RoleArn = aws_iam_role.Firehose.Arn,
                S3BackupMode = "FailedDataOnly",
                S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs
                {
                    RoleArn = aws_iam_role.Firehose.Arn,
                    BucketArn = aws_s3_bucket.Bucket.Arn,
                    BufferingSize = 10,
                    BufferingInterval = 400,
                    CompressionFormat = "GZIP",
                },
                RequestConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs
                {
                    ContentEncoding = "GZIP",
                    CommonAttributes = new[]
                    {
                        new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs
                        {
                            Name = "testname",
                            Value = "testvalue",
                        },
                        new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs
                        {
                            Name = "testname2",
                            Value = "testvalue2",
                        },
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
    			Destination: pulumi.String("http_endpoint"),
    			HttpEndpointConfiguration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationArgs{
    				Url:               pulumi.String("https://aws-api.newrelic.com/firehose/v1"),
    				Name:              pulumi.String("New Relic"),
    				AccessKey:         pulumi.String("my-key"),
    				BufferingSize:     pulumi.Int(15),
    				BufferingInterval: pulumi.Int(600),
    				RoleArn:           pulumi.Any(aws_iam_role.Firehose.Arn),
    				S3BackupMode:      pulumi.String("FailedDataOnly"),
    				S3Configuration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs{
    					RoleArn:           pulumi.Any(aws_iam_role.Firehose.Arn),
    					BucketArn:         pulumi.Any(aws_s3_bucket.Bucket.Arn),
    					BufferingSize:     pulumi.Int(10),
    					BufferingInterval: pulumi.Int(400),
    					CompressionFormat: pulumi.String("GZIP"),
    				},
    				RequestConfiguration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs{
    					ContentEncoding: pulumi.String("GZIP"),
    					CommonAttributes: kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArray{
    						&kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs{
    							Name:  pulumi.String("testname"),
    							Value: pulumi.String("testvalue"),
    						},
    						&kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs{
    							Name:  pulumi.String("testname2"),
    							Value: pulumi.String("testvalue2"),
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
    import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamHttpEndpointConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs;
    import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()        
                .destination("http_endpoint")
                .httpEndpointConfiguration(FirehoseDeliveryStreamHttpEndpointConfigurationArgs.builder()
                    .url("https://aws-api.newrelic.com/firehose/v1")
                    .name("New Relic")
                    .accessKey("my-key")
                    .bufferingSize(15)
                    .bufferingInterval(600)
                    .roleArn(aws_iam_role.firehose().arn())
                    .s3BackupMode("FailedDataOnly")
                    .s3Configuration(FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs.builder()
                        .roleArn(aws_iam_role.firehose().arn())
                        .bucketArn(aws_s3_bucket.bucket().arn())
                        .bufferingSize(10)
                        .bufferingInterval(400)
                        .compressionFormat("GZIP")
                        .build())
                    .requestConfiguration(FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs.builder()
                        .contentEncoding("GZIP")
                        .commonAttributes(                    
                            FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs.builder()
                                .name("testname")
                                .value("testvalue")
                                .build(),
                            FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs.builder()
                                .name("testname2")
                                .value("testvalue2")
                                .build())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_aws as aws
    
    test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
        destination="http_endpoint",
        http_endpoint_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationArgs(
            url="https://aws-api.newrelic.com/firehose/v1",
            name="New Relic",
            access_key="my-key",
            buffering_size=15,
            buffering_interval=600,
            role_arn=aws_iam_role["firehose"]["arn"],
            s3_backup_mode="FailedDataOnly",
            s3_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs(
                role_arn=aws_iam_role["firehose"]["arn"],
                bucket_arn=aws_s3_bucket["bucket"]["arn"],
                buffering_size=10,
                buffering_interval=400,
                compression_format="GZIP",
            ),
            request_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs(
                content_encoding="GZIP",
                common_attributes=[
                    aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs(
                        name="testname",
                        value="testvalue",
                    ),
                    aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs(
                        name="testname2",
                        value="testvalue2",
                    ),
                ],
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
        destination: "http_endpoint",
        httpEndpointConfiguration: {
            url: "https://aws-api.newrelic.com/firehose/v1",
            name: "New Relic",
            accessKey: "my-key",
            bufferingSize: 15,
            bufferingInterval: 600,
            roleArn: aws_iam_role.firehose.arn,
            s3BackupMode: "FailedDataOnly",
            s3Configuration: {
                roleArn: aws_iam_role.firehose.arn,
                bucketArn: aws_s3_bucket.bucket.arn,
                bufferingSize: 10,
                bufferingInterval: 400,
                compressionFormat: "GZIP",
            },
            requestConfiguration: {
                contentEncoding: "GZIP",
                commonAttributes: [
                    {
                        name: "testname",
                        value: "testvalue",
                    },
                    {
                        name: "testname2",
                        value: "testvalue2",
                    },
                ],
            },
        },
    });
    
    resources:
      testStream:
        type: aws:kinesis:FirehoseDeliveryStream
        properties:
          destination: http_endpoint
          httpEndpointConfiguration:
            url: https://aws-api.newrelic.com/firehose/v1
            name: New Relic
            accessKey: my-key
            bufferingSize: 15
            bufferingInterval: 600
            roleArn: ${aws_iam_role.firehose.arn}
            s3BackupMode: FailedDataOnly
            s3Configuration:
              roleArn: ${aws_iam_role.firehose.arn}
              bucketArn: ${aws_s3_bucket.bucket.arn}
              bufferingSize: 10
              bufferingInterval: 400
              compressionFormat: GZIP
            requestConfiguration:
              contentEncoding: GZIP
              commonAttributes:
                - name: testname
                  value: testvalue
                - name: testname2
                  value: testvalue2
    

    Create FirehoseDeliveryStream Resource

    new FirehoseDeliveryStream(name: string, args: FirehoseDeliveryStreamArgs, opts?: CustomResourceOptions);
    @overload
    def FirehoseDeliveryStream(resource_name: str,
                               opts: Optional[ResourceOptions] = None,
                               arn: Optional[str] = None,
                               destination: Optional[str] = None,
                               destination_id: Optional[str] = None,
                               elasticsearch_configuration: Optional[FirehoseDeliveryStreamElasticsearchConfigurationArgs] = None,
                               extended_s3_configuration: Optional[FirehoseDeliveryStreamExtendedS3ConfigurationArgs] = None,
                               http_endpoint_configuration: Optional[FirehoseDeliveryStreamHttpEndpointConfigurationArgs] = None,
                               kinesis_source_configuration: Optional[FirehoseDeliveryStreamKinesisSourceConfigurationArgs] = None,
                               name: Optional[str] = None,
                               opensearch_configuration: Optional[FirehoseDeliveryStreamOpensearchConfigurationArgs] = None,
                               redshift_configuration: Optional[FirehoseDeliveryStreamRedshiftConfigurationArgs] = None,
                               server_side_encryption: Optional[FirehoseDeliveryStreamServerSideEncryptionArgs] = None,
                               splunk_configuration: Optional[FirehoseDeliveryStreamSplunkConfigurationArgs] = None,
                               tags: Optional[Mapping[str, str]] = None,
                               version_id: Optional[str] = None)
    @overload
    def FirehoseDeliveryStream(resource_name: str,
                               args: FirehoseDeliveryStreamArgs,
                               opts: Optional[ResourceOptions] = None)
    func NewFirehoseDeliveryStream(ctx *Context, name string, args FirehoseDeliveryStreamArgs, opts ...ResourceOption) (*FirehoseDeliveryStream, error)
    public FirehoseDeliveryStream(string name, FirehoseDeliveryStreamArgs args, CustomResourceOptions? opts = null)
    public FirehoseDeliveryStream(String name, FirehoseDeliveryStreamArgs args)
    public FirehoseDeliveryStream(String name, FirehoseDeliveryStreamArgs args, CustomResourceOptions options)
    
    type: aws:kinesis:FirehoseDeliveryStream
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args FirehoseDeliveryStreamArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args FirehoseDeliveryStreamArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args FirehoseDeliveryStreamArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args FirehoseDeliveryStreamArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args FirehoseDeliveryStreamArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    FirehoseDeliveryStream Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The FirehoseDeliveryStream resource accepts the following input properties:

    Destination string

    This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch. is redshift). More details are given below.

    Arn string

    The Amazon Resource Name (ARN) specifying the Stream

    DestinationId string
    ElasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfiguration

    Configuration options if elasticsearch is the destination. More details are given below.

    ExtendedS3Configuration FirehoseDeliveryStreamExtendedS3Configuration

    Enhanced configuration options for the s3 destination. More details are given below.

    HttpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfiguration

    Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

    KinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfiguration

    Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

    Name string

    A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

    OpensearchConfiguration FirehoseDeliveryStreamOpensearchConfiguration

    Configuration options if opensearch is the destination. More details are given below.

    RedshiftConfiguration FirehoseDeliveryStreamRedshiftConfiguration

    Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

    ServerSideEncryption FirehoseDeliveryStreamServerSideEncryption

    Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

    SplunkConfiguration FirehoseDeliveryStreamSplunkConfiguration

    Configuration options if splunk is the destination. More details are given below.

    Tags Dictionary<string, string>

    A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    VersionId string

    Specifies the table version for the output data schema. Defaults to LATEST.

    Destination string

    This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch. is redshift). More details are given below.

    Arn string

    The Amazon Resource Name (ARN) specifying the Stream

    DestinationId string
    ElasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfigurationArgs

    Configuration options if elasticsearch is the destination. More details are given below.

    ExtendedS3Configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

    Enhanced configuration options for the s3 destination. More details are given below.

    HttpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

    Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

    KinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

    Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

    Name string

    A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

    OpensearchConfiguration FirehoseDeliveryStreamOpensearchConfigurationArgs

    Configuration options if opensearch is the destination. More details are given below.

    RedshiftConfiguration FirehoseDeliveryStreamRedshiftConfigurationArgs

    Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

    ServerSideEncryption FirehoseDeliveryStreamServerSideEncryptionArgs

    Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

    SplunkConfiguration FirehoseDeliveryStreamSplunkConfigurationArgs

    Configuration options if splunk is the destination. More details are given below.

    Tags map[string]string

    A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    VersionId string

    Specifies the table version for the output data schema. Defaults to LATEST.

    destination String

    This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch. is redshift). More details are given below.

    arn String

    The Amazon Resource Name (ARN) specifying the Stream

    destinationId String
    elasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfiguration

    Configuration options if elasticsearch is the destination. More details are given below.

    extendedS3Configuration FirehoseDeliveryStreamExtendedS3Configuration

    Enhanced configuration options for the s3 destination. More details are given below.

    httpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfiguration

    Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

    kinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfiguration

    Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

    name String

    A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

    opensearchConfiguration FirehoseDeliveryStreamOpensearchConfiguration

    Configuration options if opensearch is the destination. More details are given below.

    redshiftConfiguration FirehoseDeliveryStreamRedshiftConfiguration

    Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

    serverSideEncryption FirehoseDeliveryStreamServerSideEncryption

    Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

    splunkConfiguration FirehoseDeliveryStreamSplunkConfiguration

    Configuration options if splunk is the destination. More details are given below.

    tags Map<String,String>

    A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    versionId String

    Specifies the table version for the output data schema. Defaults to LATEST.

    destination string

    This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch. is redshift). More details are given below.

    arn string

    The Amazon Resource Name (ARN) specifying the Stream

    destinationId string
    elasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfiguration

    Configuration options if elasticsearch is the destination. More details are given below.

    extendedS3Configuration FirehoseDeliveryStreamExtendedS3Configuration

    Enhanced configuration options for the s3 destination. More details are given below.

    httpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfiguration

    Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

    kinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfiguration

    Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

    name string

    A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

    opensearchConfiguration FirehoseDeliveryStreamOpensearchConfiguration

    Configuration options if opensearch is the destination. More details are given below.

    redshiftConfiguration FirehoseDeliveryStreamRedshiftConfiguration

    Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

    serverSideEncryption FirehoseDeliveryStreamServerSideEncryption

    Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

    splunkConfiguration FirehoseDeliveryStreamSplunkConfiguration

    Configuration options if splunk is the destination. More details are given below.

    tags {[key: string]: string}

    A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    versionId string

    Specifies the table version for the output data schema. Defaults to LATEST.

    destination str

    This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch. is redshift). More details are given below.

    arn str

    The Amazon Resource Name (ARN) specifying the Stream

    destination_id str
    elasticsearch_configuration FirehoseDeliveryStreamElasticsearchConfigurationArgs

    Configuration options if elasticsearch is the destination. More details are given below.

    extended_s3_configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

    Enhanced configuration options for the s3 destination. More details are given below.

    http_endpoint_configuration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

    Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

    kinesis_source_configuration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

    Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

    name str

    A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

    opensearch_configuration FirehoseDeliveryStreamOpensearchConfigurationArgs

    Configuration options if opensearch is the destination. More details are given below.

    redshift_configuration FirehoseDeliveryStreamRedshiftConfigurationArgs

    Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

    server_side_encryption FirehoseDeliveryStreamServerSideEncryptionArgs

    Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

    splunk_configuration FirehoseDeliveryStreamSplunkConfigurationArgs

    Configuration options if splunk is the destination. More details are given below.

    tags Mapping[str, str]

    A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    version_id str

    Specifies the table version for the output data schema. Defaults to LATEST.

    destination String

    This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch. is redshift). More details are given below.

    arn String

    The Amazon Resource Name (ARN) specifying the Stream

    destinationId String
    elasticsearchConfiguration Property Map

    Configuration options if elasticsearch is the destination. More details are given below.

    extendedS3Configuration Property Map

    Enhanced configuration options for the s3 destination. More details are given below.

    httpEndpointConfiguration Property Map

    Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

    kinesisSourceConfiguration Property Map

    Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

    name String

    A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

    opensearchConfiguration Property Map

    Configuration options if opensearch is the destination. More details are given below.

    redshiftConfiguration Property Map

    Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

    serverSideEncryption Property Map

    Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

    splunkConfiguration Property Map

    Configuration options if splunk is the destination. More details are given below.

    tags Map<String>

    A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    versionId String

    Specifies the table version for the output data schema. Defaults to LATEST.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the FirehoseDeliveryStream resource produces the following output properties:

    Id string

    The provider-assigned unique ID for this managed resource.

    TagsAll Dictionary<string, string>

    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    Id string

    The provider-assigned unique ID for this managed resource.

    TagsAll map[string]string

    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    id String

    The provider-assigned unique ID for this managed resource.

    tagsAll Map<String,String>

    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    id string

    The provider-assigned unique ID for this managed resource.

    tagsAll {[key: string]: string}

    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    id str

    The provider-assigned unique ID for this managed resource.

    tags_all Mapping[str, str]

    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    id String

    The provider-assigned unique ID for this managed resource.

    tagsAll Map<String>

    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    Look up Existing FirehoseDeliveryStream Resource

    Get an existing FirehoseDeliveryStream resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: FirehoseDeliveryStreamState, opts?: CustomResourceOptions): FirehoseDeliveryStream
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            arn: Optional[str] = None,
            destination: Optional[str] = None,
            destination_id: Optional[str] = None,
            elasticsearch_configuration: Optional[FirehoseDeliveryStreamElasticsearchConfigurationArgs] = None,
            extended_s3_configuration: Optional[FirehoseDeliveryStreamExtendedS3ConfigurationArgs] = None,
            http_endpoint_configuration: Optional[FirehoseDeliveryStreamHttpEndpointConfigurationArgs] = None,
            kinesis_source_configuration: Optional[FirehoseDeliveryStreamKinesisSourceConfigurationArgs] = None,
            name: Optional[str] = None,
            opensearch_configuration: Optional[FirehoseDeliveryStreamOpensearchConfigurationArgs] = None,
            redshift_configuration: Optional[FirehoseDeliveryStreamRedshiftConfigurationArgs] = None,
            server_side_encryption: Optional[FirehoseDeliveryStreamServerSideEncryptionArgs] = None,
            splunk_configuration: Optional[FirehoseDeliveryStreamSplunkConfigurationArgs] = None,
            tags: Optional[Mapping[str, str]] = None,
            tags_all: Optional[Mapping[str, str]] = None,
            version_id: Optional[str] = None) -> FirehoseDeliveryStream
    func GetFirehoseDeliveryStream(ctx *Context, name string, id IDInput, state *FirehoseDeliveryStreamState, opts ...ResourceOption) (*FirehoseDeliveryStream, error)
    public static FirehoseDeliveryStream Get(string name, Input<string> id, FirehoseDeliveryStreamState? state, CustomResourceOptions? opts = null)
    public static FirehoseDeliveryStream get(String name, Output<String> id, FirehoseDeliveryStreamState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Arn string

    The Amazon Resource Name (ARN) specifying the Stream

    Destination string

    This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch. is redshift). More details are given below.

    DestinationId string
    ElasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfiguration

    Configuration options if elasticsearch is the destination. More details are given below.

    ExtendedS3Configuration FirehoseDeliveryStreamExtendedS3Configuration

    Enhanced configuration options for the s3 destination. More details are given below.

    HttpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfiguration

    Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

    KinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfiguration

    Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

    Name string

    A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

    OpensearchConfiguration FirehoseDeliveryStreamOpensearchConfiguration

    Configuration options if opensearch is the destination. More details are given below.

    RedshiftConfiguration FirehoseDeliveryStreamRedshiftConfiguration

    Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

    ServerSideEncryption FirehoseDeliveryStreamServerSideEncryption

    Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

    SplunkConfiguration FirehoseDeliveryStreamSplunkConfiguration

    Configuration options if splunk is the destination. More details are given below.

    Tags Dictionary<string, string>

    A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    TagsAll Dictionary<string, string>

    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    VersionId string

    Specifies the table version for the output data schema. Defaults to LATEST.

    Arn string

    The Amazon Resource Name (ARN) specifying the Stream

    Destination string

    This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch. is redshift). More details are given below.

    DestinationId string
    ElasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfigurationArgs

    Configuration options if elasticsearch is the destination. More details are given below.

    ExtendedS3Configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

    Enhanced configuration options for the s3 destination. More details are given below.

    HttpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

    Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

    KinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

    Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

    Name string

    A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

    OpensearchConfiguration FirehoseDeliveryStreamOpensearchConfigurationArgs

    Configuration options if opensearch is the destination. More details are given below.

    RedshiftConfiguration FirehoseDeliveryStreamRedshiftConfigurationArgs

    Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

    ServerSideEncryption FirehoseDeliveryStreamServerSideEncryptionArgs

    Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

    SplunkConfiguration FirehoseDeliveryStreamSplunkConfigurationArgs

    Configuration options if splunk is the destination. More details are given below.

    Tags map[string]string

    A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    TagsAll map[string]string

    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    VersionId string

    Specifies the table version for the output data schema. Defaults to LATEST.

    arn String

    The Amazon Resource Name (ARN) specifying the Stream

    destination String

    This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch. is redshift). More details are given below.

    destinationId String
    elasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfiguration

    Configuration options if elasticsearch is the destination. More details are given below.

    extendedS3Configuration FirehoseDeliveryStreamExtendedS3Configuration

    Enhanced configuration options for the s3 destination. More details are given below.

    httpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfiguration

    Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

    kinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfiguration

    Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

    name String

    A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

    opensearchConfiguration FirehoseDeliveryStreamOpensearchConfiguration

    Configuration options if opensearch is the destination. More details are given below.

    redshiftConfiguration FirehoseDeliveryStreamRedshiftConfiguration

    Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

    serverSideEncryption FirehoseDeliveryStreamServerSideEncryption

    Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

    splunkConfiguration FirehoseDeliveryStreamSplunkConfiguration

    Configuration options if splunk is the destination. More details are given below.

    tags Map<String,String>

    A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    tagsAll Map<String,String>

    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    versionId String

    Specifies the table version for the output data schema. Defaults to LATEST.

    arn string

    The Amazon Resource Name (ARN) specifying the Stream

    destination string

    This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch. is redshift). More details are given below.

    destinationId string
    elasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfiguration

    Configuration options if elasticsearch is the destination. More details are given below.

    extendedS3Configuration FirehoseDeliveryStreamExtendedS3Configuration

    Enhanced configuration options for the s3 destination. More details are given below.

    httpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfiguration

    Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

    kinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfiguration

    Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

    name string

    A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

    opensearchConfiguration FirehoseDeliveryStreamOpensearchConfiguration

    Configuration options if opensearch is the destination. More details are given below.

    redshiftConfiguration FirehoseDeliveryStreamRedshiftConfiguration

    Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

    serverSideEncryption FirehoseDeliveryStreamServerSideEncryption

    Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

    splunkConfiguration FirehoseDeliveryStreamSplunkConfiguration

    Configuration options if splunk is the destination. More details are given below.

    tags {[key: string]: string}

    A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    tagsAll {[key: string]: string}

    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    versionId string

    Specifies the table version for the output data schema. Defaults to LATEST.

    arn str

    The Amazon Resource Name (ARN) specifying the Stream

    destination str

    This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch. is redshift). More details are given below.

    destination_id str
    elasticsearch_configuration FirehoseDeliveryStreamElasticsearchConfigurationArgs

    Configuration options if elasticsearch is the destination. More details are given below.

    extended_s3_configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

    Enhanced configuration options for the s3 destination. More details are given below.

    http_endpoint_configuration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

    Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

    kinesis_source_configuration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

    Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

    name str

    A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

    opensearch_configuration FirehoseDeliveryStreamOpensearchConfigurationArgs

    Configuration options if opensearch is the destination. More details are given below.

    redshift_configuration FirehoseDeliveryStreamRedshiftConfigurationArgs

    Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

    server_side_encryption FirehoseDeliveryStreamServerSideEncryptionArgs

    Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

    splunk_configuration FirehoseDeliveryStreamSplunkConfigurationArgs

    Configuration options if splunk is the destination. More details are given below.

    tags Mapping[str, str]

    A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    tags_all Mapping[str, str]

    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    version_id str

    Specifies the table version for the output data schema. Defaults to LATEST.

    arn String

    The Amazon Resource Name (ARN) specifying the Stream

    destination String

    This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch. is redshift). More details are given below.

    destinationId String
    elasticsearchConfiguration Property Map

    Configuration options if elasticsearch is the destination. More details are given below.

    extendedS3Configuration Property Map

    Enhanced configuration options for the s3 destination. More details are given below.

    httpEndpointConfiguration Property Map

    Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

    kinesisSourceConfiguration Property Map

    Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

    name String

    A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

    opensearchConfiguration Property Map

    Configuration options if opensearch is the destination. More details are given below.

    redshiftConfiguration Property Map

    Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

    serverSideEncryption Property Map

    Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

    splunkConfiguration Property Map

    Configuration options if splunk is the destination. More details are given below.

    tags Map<String>

    A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    tagsAll Map<String>

    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    versionId String

    Specifies the table version for the output data schema. Defaults to LATEST.

    Supporting Types

    FirehoseDeliveryStreamElasticsearchConfiguration, FirehoseDeliveryStreamElasticsearchConfigurationArgs

    IndexName string

    The Elasticsearch index name.

    RoleArn string

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*.

    S3Configuration FirehoseDeliveryStreamElasticsearchConfigurationS3Configuration

    The S3 Configuration. See s3_configuration for more details.

    BufferingInterval int

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    BufferingSize int

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.

    CloudwatchLoggingOptions FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    ClusterEndpoint string

    The endpoint to use when communicating with the cluster. Conflicts with domain_arn.

    DomainArn string

    The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint.

    IndexRotationPeriod string

    The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay.

    ProcessingConfiguration FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration

    The data processing configuration. More details are given below.

    RetryDuration int

    After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.

    S3BackupMode string

    Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly.

    TypeName string

    The Elasticsearch type name with maximum length of 100 characters.

    VpcConfig FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig

    The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below

    IndexName string

    The Elasticsearch index name.

    RoleArn string

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*.

    S3Configuration FirehoseDeliveryStreamElasticsearchConfigurationS3Configuration

    The S3 Configuration. See s3_configuration for more details.

    BufferingInterval int

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    BufferingSize int

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.

    CloudwatchLoggingOptions FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    ClusterEndpoint string

    The endpoint to use when communicating with the cluster. Conflicts with domain_arn.

    DomainArn string

    The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint.

    IndexRotationPeriod string

    The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay.

    ProcessingConfiguration FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration

    The data processing configuration. More details are given below.

    RetryDuration int

    After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.

    S3BackupMode string

    Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly.

    TypeName string

    The Elasticsearch type name with maximum length of 100 characters.

    VpcConfig FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig

    The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below

    indexName String

    The Elasticsearch index name.

    roleArn String

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*.

    s3Configuration FirehoseDeliveryStreamElasticsearchConfigurationS3Configuration

    The S3 Configuration. See s3_configuration for more details.

    bufferingInterval Integer

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    bufferingSize Integer

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.

    cloudwatchLoggingOptions FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    clusterEndpoint String

    The endpoint to use when communicating with the cluster. Conflicts with domain_arn.

    domainArn String

    The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint.

    indexRotationPeriod String

    The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay.

    processingConfiguration FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration

    The data processing configuration. More details are given below.

    retryDuration Integer

    After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.

    s3BackupMode String

    Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly.

    typeName String

    The Elasticsearch type name with maximum length of 100 characters.

    vpcConfig FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig

    The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below

    indexName string

    The Elasticsearch index name.

    roleArn string

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*.

    s3Configuration FirehoseDeliveryStreamElasticsearchConfigurationS3Configuration

    The S3 Configuration. See s3_configuration for more details.

    bufferingInterval number

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    bufferingSize number

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.

    cloudwatchLoggingOptions FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    clusterEndpoint string

    The endpoint to use when communicating with the cluster. Conflicts with domain_arn.

    domainArn string

    The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint.

    indexRotationPeriod string

    The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay.

    processingConfiguration FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration

    The data processing configuration. More details are given below.

    retryDuration number

    After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.

    s3BackupMode string

    Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly.

    typeName string

    The Elasticsearch type name with maximum length of 100 characters.

    vpcConfig FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig

    The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below

    index_name str

    The Elasticsearch index name.

    role_arn str

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*.

    s3_configuration FirehoseDeliveryStreamElasticsearchConfigurationS3Configuration

    The S3 Configuration. See s3_configuration for more details.

    buffering_interval int

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    buffering_size int

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.

    cloudwatch_logging_options FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    cluster_endpoint str

    The endpoint to use when communicating with the cluster. Conflicts with domain_arn.

    domain_arn str

    The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint.

    index_rotation_period str

    The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay.

    processing_configuration FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration

    The data processing configuration. More details are given below.

    retry_duration int

    After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.

    s3_backup_mode str

    Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly.

    type_name str

    The Elasticsearch type name with maximum length of 100 characters.

    vpc_config FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig

    The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below

    indexName String

    The Elasticsearch index name.

    roleArn String

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*.

    s3Configuration Property Map

    The S3 Configuration. See s3_configuration for more details.

    bufferingInterval Number

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    bufferingSize Number

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.

    cloudwatchLoggingOptions Property Map

    The CloudWatch Logging Options for the delivery stream. More details are given below

    clusterEndpoint String

    The endpoint to use when communicating with the cluster. Conflicts with domain_arn.

    domainArn String

    The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint.

    indexRotationPeriod String

    The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay.

    processingConfiguration Property Map

    The data processing configuration. More details are given below.

    retryDuration Number

    After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.

    s3BackupMode String

    Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly.

    typeName String

    The Elasticsearch type name with maximum length of 100 characters.

    vpcConfig Property Map

    The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below

    FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptionsArgs

    Enabled bool

    Enables or disables the logging. Defaults to false.

    LogGroupName string

    The CloudWatch group name for logging. This value is required if enabled is true.

    LogStreamName string

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    Enabled bool

    Enables or disables the logging. Defaults to false.

    LogGroupName string

    The CloudWatch group name for logging. This value is required if enabled is true.

    LogStreamName string

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    enabled Boolean

    Enables or disables the logging. Defaults to false.

    logGroupName String

    The CloudWatch group name for logging. This value is required if enabled is true.

    logStreamName String

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    enabled boolean

    Enables or disables the logging. Defaults to false.

    logGroupName string

    The CloudWatch group name for logging. This value is required if enabled is true.

    logStreamName string

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    enabled bool

    Enables or disables the logging. Defaults to false.

    log_group_name str

    The CloudWatch group name for logging. This value is required if enabled is true.

    log_stream_name str

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    enabled Boolean

    Enables or disables the logging. Defaults to false.

    logGroupName String

    The CloudWatch group name for logging. This value is required if enabled is true.

    logStreamName String

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration, FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs

    Enabled bool

    Enables or disables data processing.

    Processors List<FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor>

    Array of data processors. More details are given below

    Enabled bool

    Enables or disables data processing.

    Processors []FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor

    Array of data processors. More details are given below

    enabled Boolean

    Enables or disables data processing.

    processors List<FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor>

    Array of data processors. More details are given below

    enabled boolean

    Enables or disables data processing.

    processors FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor[]

    Array of data processors. More details are given below

    enabled bool

    Enables or disables data processing.

    processors Sequence[FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor]

    Array of data processors. More details are given below

    enabled Boolean

    Enables or disables data processing.

    processors List<Property Map>

    Array of data processors. More details are given below

    FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor, FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs

    Type string

    The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

    Parameters List<FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter>

    Array of processor parameters. More details are given below

    Type string

    The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

    Parameters []FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter

    Array of processor parameters. More details are given below

    type String

    The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

    parameters List<FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter>

    Array of processor parameters. More details are given below

    type string

    The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

    parameters FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter[]

    Array of processor parameters. More details are given below

    type str

    The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

    parameters Sequence[FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter]

    Array of processor parameters. More details are given below

    type String

    The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

    parameters List<Property Map>

    Array of processor parameters. More details are given below

    FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter, FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs

    ParameterName string

    Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

    ParameterValue string

    Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.

    NOTE: Parameters with default values, including NumberOfRetries(default: 3), RoleArn(default: firehose role ARN), BufferSizeInMBs(default: 3), and BufferIntervalInSeconds(default: 60), are not stored in state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.

    ParameterName string

    Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

    ParameterValue string

    Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.

    NOTE: Parameters with default values, including NumberOfRetries(default: 3), RoleArn(default: firehose role ARN), BufferSizeInMBs(default: 3), and BufferIntervalInSeconds(default: 60), are not stored in state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.

    parameterName String

    Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

    parameterValue String

    Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.

    NOTE: Parameters with default values, including NumberOfRetries(default: 3), RoleArn(default: firehose role ARN), BufferSizeInMBs(default: 3), and BufferIntervalInSeconds(default: 60), are not stored in state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.

    parameterName string

    Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

    parameterValue string

    Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.

    NOTE: Parameters with default values, including NumberOfRetries(default: 3), RoleArn(default: firehose role ARN), BufferSizeInMBs(default: 3), and BufferIntervalInSeconds(default: 60), are not stored in state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.

    parameter_name str

    Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

    parameter_value str

    Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.

    NOTE: Parameters with default values, including NumberOfRetries(default: 3), RoleArn(default: firehose role ARN), BufferSizeInMBs(default: 3), and BufferIntervalInSeconds(default: 60), are not stored in state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.

    parameterName String

    Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

    parameterValue String

    Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.

    NOTE: Parameters with default values, including NumberOfRetries(default: 3), RoleArn(default: firehose role ARN), BufferSizeInMBs(default: 3), and BufferIntervalInSeconds(default: 60), are not stored in state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.

    FirehoseDeliveryStreamElasticsearchConfigurationS3Configuration, FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs

    BucketArn string

    The ARN of the S3 bucket

    RoleArn string

    The ARN of the role that provides access to the source Kinesis stream.

    BufferingInterval int

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    BufferingSize int

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    CloudwatchLoggingOptions FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    CompressionFormat string

    The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

    ErrorOutputPrefix string

    Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    KmsKeyArn string

    Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

    Prefix string

    The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

    BucketArn string

    The ARN of the S3 bucket

    RoleArn string

    The ARN of the role that provides access to the source Kinesis stream.

    BufferingInterval int

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    BufferingSize int

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    CloudwatchLoggingOptions FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    CompressionFormat string

    The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

    ErrorOutputPrefix string

    Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    KmsKeyArn string

    Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

    Prefix string

    The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

    bucketArn String

    The ARN of the S3 bucket

    roleArn String

    The ARN of the role that provides access to the source Kinesis stream.

    bufferingInterval Integer

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    bufferingSize Integer

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    cloudwatchLoggingOptions FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    compressionFormat String

    The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

    errorOutputPrefix String

    Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    kmsKeyArn String

    Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

    prefix String

    The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

    bucketArn string

    The ARN of the S3 bucket

    roleArn string

    The ARN of the role that provides access to the source Kinesis stream.

    bufferingInterval number

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    bufferingSize number

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    cloudwatchLoggingOptions FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    compressionFormat string

    The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

    errorOutputPrefix string

    Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    kmsKeyArn string

    Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

    prefix string

    The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

    bucket_arn str

    The ARN of the S3 bucket

    role_arn str

    The ARN of the role that provides access to the source Kinesis stream.

    buffering_interval int

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    buffering_size int

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    cloudwatch_logging_options FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    compression_format str

    The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

    error_output_prefix str

    Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    kms_key_arn str

    Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

    prefix str

    The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

    bucketArn String

    The ARN of the S3 bucket

    roleArn String

    The ARN of the role that provides access to the source Kinesis stream.

    bufferingInterval Number

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    bufferingSize Number

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    cloudwatchLoggingOptions Property Map

    The CloudWatch Logging Options for the delivery stream. More details are given below

    compressionFormat String

    The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

    errorOutputPrefix String

    Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    kmsKeyArn String

    Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

    prefix String

    The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

    FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs

    Enabled bool

    Enables or disables the logging. Defaults to false.

    LogGroupName string

    The CloudWatch group name for logging. This value is required if enabled is true.

    LogStreamName string

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    Enabled bool

    Enables or disables the logging. Defaults to false.

    LogGroupName string

    The CloudWatch group name for logging. This value is required if enabled is true.

    LogStreamName string

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    enabled Boolean

    Enables or disables the logging. Defaults to false.

    logGroupName String

    The CloudWatch group name for logging. This value is required if enabled is true.

    logStreamName String

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    enabled boolean

    Enables or disables the logging. Defaults to false.

    logGroupName string

    The CloudWatch group name for logging. This value is required if enabled is true.

    logStreamName string

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    enabled bool

    Enables or disables the logging. Defaults to false.

    log_group_name str

    The CloudWatch group name for logging. This value is required if enabled is true.

    log_stream_name str

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    enabled Boolean

    Enables or disables the logging. Defaults to false.

    logGroupName String

    The CloudWatch group name for logging. This value is required if enabled is true.

    logStreamName String

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig, FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs

    RoleArn string

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions

    SecurityGroupIds List<string>

    A list of security group IDs to associate with Kinesis Firehose.

    SubnetIds List<string>

    A list of subnet IDs to associate with Kinesis Firehose.

    VpcId string
    RoleArn string

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions

    SecurityGroupIds []string

    A list of security group IDs to associate with Kinesis Firehose.

    SubnetIds []string

    A list of subnet IDs to associate with Kinesis Firehose.

    VpcId string
    roleArn String

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions

    securityGroupIds List<String>

    A list of security group IDs to associate with Kinesis Firehose.

    subnetIds List<String>

    A list of subnet IDs to associate with Kinesis Firehose.

    vpcId String
    roleArn string

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions

    securityGroupIds string[]

    A list of security group IDs to associate with Kinesis Firehose.

    subnetIds string[]

    A list of subnet IDs to associate with Kinesis Firehose.

    vpcId string
    role_arn str

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions

    security_group_ids Sequence[str]

    A list of security group IDs to associate with Kinesis Firehose.

    subnet_ids Sequence[str]

    A list of subnet IDs to associate with Kinesis Firehose.

    vpc_id str
    roleArn String

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions

    securityGroupIds List<String>

    A list of security group IDs to associate with Kinesis Firehose.

    subnetIds List<String>

    A list of subnet IDs to associate with Kinesis Firehose.

    vpcId String

    FirehoseDeliveryStreamExtendedS3Configuration, FirehoseDeliveryStreamExtendedS3ConfigurationArgs

    BucketArn string

    The ARN of the S3 bucket

    RoleArn string

    The ARN of the role that provides access to the source Kinesis stream.

    BufferingInterval int

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    BufferingSize int

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    CloudwatchLoggingOptions FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    CompressionFormat string

    The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

    DataFormatConversionConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration

    Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.

    DynamicPartitioningConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration

    The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.

    ErrorOutputPrefix string

    Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    KmsKeyArn string

    Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

    Prefix string

    The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

    ProcessingConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration

    The data processing configuration. More details are given below.

    S3BackupConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfiguration

    The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object.

    S3BackupMode string

    The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled.

    BucketArn string

    The ARN of the S3 bucket

    RoleArn string

    The ARN of the role that provides access to the source Kinesis stream.

    BufferingInterval int

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    BufferingSize int

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    CloudwatchLoggingOptions FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    CompressionFormat string

    The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

    DataFormatConversionConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration

    Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.

    DynamicPartitioningConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration

    The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.

    ErrorOutputPrefix string

    Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    KmsKeyArn string

    Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

    Prefix string

    The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

    ProcessingConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration

    The data processing configuration. More details are given below.

    S3BackupConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfiguration

    The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object.

    S3BackupMode string

    The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled.

    bucketArn String

    The ARN of the S3 bucket

    roleArn String

    The ARN of the role that provides access to the source Kinesis stream.

    bufferingInterval Integer

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    bufferingSize Integer

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    cloudwatchLoggingOptions FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    compressionFormat String

    The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

    dataFormatConversionConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration

    Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.

    dynamicPartitioningConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration

    The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.

    errorOutputPrefix String

    Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    kmsKeyArn String

    Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

    prefix String

    The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

    processingConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration

    The data processing configuration. More details are given below.

    s3BackupConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfiguration

    The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object.

    s3BackupMode String

    The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled.

    bucketArn string

    The ARN of the S3 bucket

    roleArn string

    The ARN of the role that provides access to the source Kinesis stream.

    bufferingInterval number

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    bufferingSize number

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    cloudwatchLoggingOptions FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    compressionFormat string

    The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

    dataFormatConversionConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration

    Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.

    dynamicPartitioningConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration

    The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.

    errorOutputPrefix string

    Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    kmsKeyArn string

    Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

    prefix string

    The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

    processingConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration

    The data processing configuration. More details are given below.

    s3BackupConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfiguration

    The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object.

    s3BackupMode string

    The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled.

    bucket_arn str

    The ARN of the S3 bucket

    role_arn str

    The ARN of the role that provides access to the source Kinesis stream.

    buffering_interval int

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    buffering_size int

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    cloudwatch_logging_options FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions

    The CloudWatch Logging Options for the delivery stream. More details are given below

    compression_format str

    The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

    data_format_conversion_configuration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration

    Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.

    dynamic_partitioning_configuration FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration

    The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.

    error_output_prefix str

    Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    kms_key_arn str

    Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

    prefix str

    The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

    processing_configuration FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration

    The data processing configuration. More details are given below.

    s3_backup_configuration FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfiguration

    The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object.

    s3_backup_mode str

    The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled.

    bucketArn String

    The ARN of the S3 bucket

    roleArn String

    The ARN of the role that provides access to the source Kinesis stream.

    bufferingInterval Number

    Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

    bufferingSize Number

    Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    cloudwatchLoggingOptions Property Map

    The CloudWatch Logging Options for the delivery stream. More details are given below

    compressionFormat String

    The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

    dataFormatConversionConfiguration Property Map

    Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.

    dynamicPartitioningConfiguration Property Map

    The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.

    errorOutputPrefix String

    Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    kmsKeyArn String

    Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

    prefix String

    The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

    processingConfiguration Property Map

    The data processing configuration. More details are given below.

    s3BackupConfiguration Property Map

    The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object.

    s3BackupMode String

    The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled.

    FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptionsArgs

    Enabled bool

    Enables or disables the logging. Defaults to false.

    LogGroupName string

    The CloudWatch group name for logging. This value is required if enabled is true.

    LogStreamName string

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    Enabled bool

    Enables or disables the logging. Defaults to false.

    LogGroupName string

    The CloudWatch group name for logging. This value is required if enabled is true.

    LogStreamName string

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    enabled Boolean

    Enables or disables the logging. Defaults to false.

    logGroupName String

    The CloudWatch group name for logging. This value is required if enabled is true.

    logStreamName String

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    enabled boolean

    Enables or disables the logging. Defaults to false.

    logGroupName string

    The CloudWatch group name for logging. This value is required if enabled is true.

    logStreamName string

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    enabled bool

    Enables or disables the logging. Defaults to false.

    log_group_name str

    The CloudWatch group name for logging. This value is required if enabled is true.

    log_stream_name str

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    enabled Boolean

    Enables or disables the logging. Defaults to false.

    logGroupName String

    The CloudWatch group name for logging. This value is required if enabled is true.

    logStreamName String

    The CloudWatch log stream name for logging. This value is required if enabled is true.

    FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationArgs

    InputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration

    Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.

    OutputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration

    Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.

    SchemaConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration

    Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.

    Enabled bool

    Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

    InputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration

    Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.

    OutputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration

    Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.

    SchemaConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration

    Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.

    Enabled bool

    Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

    inputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration

    Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.

    outputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration

    Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.

    schemaConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration

    Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.

    enabled Boolean

    Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

    inputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration

    Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.

    outputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration

    Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.

    schemaConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration

    Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.

    enabled boolean

    Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

    input_format_configuration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration

    Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.

    output_format_configuration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration

    Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.

    schema_configuration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration

    Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.

    enabled bool

    Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

    inputFormatConfiguration Property Map

    Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.

    outputFormatConfiguration Property Map

    Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.

    schemaConfiguration Property Map

    Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.

    enabled Boolean

    Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

    FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs

    Deserializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer

    Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.

    Deserializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer

    Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.

    deserializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer

    Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.

    deserializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer

    Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.

    deserializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer

    Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.

    deserializer Property Map

    Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.

    FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerArgs

    hiveJsonSerDe Property Map

    Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.

    openXJsonSerDe Property Map

    Nested argument that specifies the OpenX SerDe. More details below.

    FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDe, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDeArgs

    TimestampFormats List<string>

    A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

    TimestampFormats []string

    A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

    timestampFormats List<String>

    A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

    timestampFormats string[]

    A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

    timestamp_formats Sequence[str]

    A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

    timestampFormats List<String>

    A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

    FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDe, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDeArgs

    CaseInsensitive bool

    When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

    ColumnToJsonKeyMappings Dictionary<string, string>

    A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts.

    ConvertDotsInJsonKeysToUnderscores bool

    When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false.

    CaseInsensitive bool

    When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

    ColumnToJsonKeyMappings map[string]string

    A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts.

    ConvertDotsInJsonKeysToUnderscores bool

    When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false.

    caseInsensitive Boolean

    When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

    columnToJsonKeyMappings Map<String,String>

    A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts.

    convertDotsInJsonKeysToUnderscores Boolean

    When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false.

    caseInsensitive boolean

    When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

    columnToJsonKeyMappings {[key: string]: string}

    A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts.

    convertDotsInJsonKeysToUnderscores boolean

    When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false.

    case_insensitive bool

    When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

    column_to_json_key_mappings Mapping[str, str]

    A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts.

    convert_dots_in_json_keys_to_underscores bool

    When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false.

    caseInsensitive Boolean

    When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

    columnToJsonKeyMappings Map<String>

    A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts.

    convertDotsInJsonKeysToUnderscores Boolean

    When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false.

    FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationArgs

    Serializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer

    Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.

    Serializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer

    Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.

    serializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer

    Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.

    serializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer

    Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.

    serializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer

    Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.

    serializer Property Map

    Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.

    FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerArgs

    OrcSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe

    Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.

    ParquetSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe

    Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.

    OrcSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe

    Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.

    ParquetSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe

    Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.

    orcSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe

    Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.

    parquetSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe

    Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.

    orcSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe

    Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.

    parquetSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe

    Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.

    orc_ser_de FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe

    Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.

    parquet_ser_de FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe

    Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.

    orcSerDe Property Map

    Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.

    parquetSerDe Property Map

    Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.

    FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDeArgs

    BlockSizeBytes int

    The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

    BloomFilterColumns List<string>

    A list of column names for which you want Kinesis Data Firehose to create bloom filters.

    BloomFilterFalsePositiveProbability double

    The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

    Compression string

    The compression code to use over data blocks. The default is SNAPPY.

    DictionaryKeyThreshold double

    A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

    EnablePadding bool

    Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

    FormatVersion string

    The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

    PaddingTolerance double

    A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false.

    RowIndexStride int

    The number of rows between index entries. The default is 10000 and the minimum is 1000.

    StripeSizeBytes int

    The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

    BlockSizeBytes int

    The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

    BloomFilterColumns []string

    A list of column names for which you want Kinesis Data Firehose to create bloom filters.

    BloomFilterFalsePositiveProbability float64

    The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

    Compression string

    The compression code to use over data blocks. The default is SNAPPY.

    DictionaryKeyThreshold float64

    A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

    EnablePadding bool

    Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

    FormatVersion string

    The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

    PaddingTolerance float64

    A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false.

    RowIndexStride int

    The number of rows between index entries. The default is 10000 and the minimum is 1000.

    StripeSizeBytes int

    The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

    blockSizeBytes Integer

    The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

    bloomFilterColumns List<String>

    A list of column names for which you want Kinesis Data Firehose to create bloom filters.

    bloomFilterFalsePositiveProbability Double

    The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

    compression String

    The compression code to use over data blocks. The default is SNAPPY.

    dictionaryKeyThreshold Double

    A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

    enablePadding Boolean

    Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

    formatVersion String

    The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

    paddingTolerance Double

    A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false.

    rowIndexStride Integer

    The number of rows between index entries. The default is 10000 and the minimum is 1000.

    stripeSizeBytes Integer

    The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

    blockSizeBytes number

    The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

    bloomFilterColumns string[]

    A list of column names for which you want Kinesis Data Firehose to create bloom filters.

    bloomFilterFalsePositiveProbability number

    The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

    compression string

    The compression code to use over data blocks. The default is SNAPPY.

    dictionaryKeyThreshold number

    A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

    enablePadding boolean

    Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

    formatVersion string

    The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

    paddingTolerance number

    A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false.

    rowIndexStride number

    The number of rows between index entries. The default is 10000 and the minimum is 1000.

    stripeSizeBytes number

    The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

    block_size_bytes int

    The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

    bloom_filter_columns Sequence[str]

    A list of column names for which you want Kinesis Data Firehose to create bloom filters.

    bloom_filter_false_positive_probability float

    The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

    compression str

    The compression code to use over data blocks. The default is SNAPPY.

    dictionary_key_threshold float

    A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

    enable_padding bool

    Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

    format_version str

    The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

    padding_tolerance float

    A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false.

    row_index_stride int

    The number of rows between index entries. The default is 10000 and the minimum is 1000.

    stripe_size_bytes int

    The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

    blockSizeBytes Number

    The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

    bloomFilterColumns List<String>

    A list of column names for which you want Kinesis Data Firehose to create bloom filters.

    bloomFilterFalsePositiveProbability Number

    The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

    compression String

    The compression code to use over data blocks. The default is SNAPPY.

    dictionaryKeyThreshold Number

    A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

    enablePadding Boolean

    Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

    formatVersion String

    The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

    paddingTolerance Number

    A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false.

    rowIndexStride Number

    The number of rows between index entries. The default is 10000 and the minimum is 1000.

    stripeSizeBytes Number

    The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

    FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDeArgs

    BlockSizeBytes int

    The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

    Compression string

    The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.

    EnableDictionaryCompression bool

    Indicates whether to enable dictionary compression.

    MaxPaddingBytes int

    The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.

    PageSizeBytes int

    The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.

    WriterVersion string

    Indicates the version of row format to output. The possible values are V1 and V2. The default is V1.

    BlockSizeBytes int

    The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

    Compression string

    The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.

    EnableDictionaryCompression bool

    Indicates whether to enable dictionary compression.

    MaxPaddingBytes int

    The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon