aws logo
AWS Classic v5.41.0, May 15 23

aws.kinesis.FirehoseDeliveryStream

Explore with Pulumi AI

Provides a Kinesis Firehose Delivery Stream resource. Amazon Kinesis Firehose is a fully managed, elastic service to easily deliver real-time data streams to destinations such as Amazon S3 and Amazon Redshift.

For more details, see the Amazon Kinesis Firehose Documentation.

Example Usage

Extended S3 Destination

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var bucket = new Aws.S3.BucketV2("bucket");

    var firehoseAssumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
    {
        Statements = new[]
        {
            new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
            {
                Effect = "Allow",
                Principals = new[]
                {
                    new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
                    {
                        Type = "Service",
                        Identifiers = new[]
                        {
                            "firehose.amazonaws.com",
                        },
                    },
                },
                Actions = new[]
                {
                    "sts:AssumeRole",
                },
            },
        },
    });

    var firehoseRole = new Aws.Iam.Role("firehoseRole", new()
    {
        AssumeRolePolicy = firehoseAssumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
    });

    var lambdaAssumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
    {
        Statements = new[]
        {
            new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
            {
                Effect = "Allow",
                Principals = new[]
                {
                    new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
                    {
                        Type = "Service",
                        Identifiers = new[]
                        {
                            "lambda.amazonaws.com",
                        },
                    },
                },
                Actions = new[]
                {
                    "sts:AssumeRole",
                },
            },
        },
    });

    var lambdaIam = new Aws.Iam.Role("lambdaIam", new()
    {
        AssumeRolePolicy = lambdaAssumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
    });

    var lambdaProcessor = new Aws.Lambda.Function("lambdaProcessor", new()
    {
        Code = new FileArchive("lambda.zip"),
        Role = lambdaIam.Arn,
        Handler = "exports.handler",
        Runtime = "nodejs16.x",
    });

    var extendedS3Stream = new Aws.Kinesis.FirehoseDeliveryStream("extendedS3Stream", new()
    {
        Destination = "extended_s3",
        ExtendedS3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs
        {
            RoleArn = firehoseRole.Arn,
            BucketArn = bucket.Arn,
            ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
            {
                Enabled = true,
                Processors = new[]
                {
                    new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
                    {
                        Type = "Lambda",
                        Parameters = new[]
                        {
                            new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
                            {
                                ParameterName = "LambdaArn",
                                ParameterValue = lambdaProcessor.Arn.Apply(arn => $"{arn}:$LATEST"),
                            },
                        },
                    },
                },
            },
        },
    });

    var bucketAcl = new Aws.S3.BucketAclV2("bucketAcl", new()
    {
        Bucket = bucket.Id,
        Acl = "private",
    });

});
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/lambda"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/s3"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		bucket, err := s3.NewBucketV2(ctx, "bucket", nil)
		if err != nil {
			return err
		}
		firehoseAssumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
			Statements: []iam.GetPolicyDocumentStatement{
				{
					Effect: pulumi.StringRef("Allow"),
					Principals: []iam.GetPolicyDocumentStatementPrincipal{
						{
							Type: "Service",
							Identifiers: []string{
								"firehose.amazonaws.com",
							},
						},
					},
					Actions: []string{
						"sts:AssumeRole",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		firehoseRole, err := iam.NewRole(ctx, "firehoseRole", &iam.RoleArgs{
			AssumeRolePolicy: *pulumi.String(firehoseAssumeRole.Json),
		})
		if err != nil {
			return err
		}
		lambdaAssumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
			Statements: []iam.GetPolicyDocumentStatement{
				{
					Effect: pulumi.StringRef("Allow"),
					Principals: []iam.GetPolicyDocumentStatementPrincipal{
						{
							Type: "Service",
							Identifiers: []string{
								"lambda.amazonaws.com",
							},
						},
					},
					Actions: []string{
						"sts:AssumeRole",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		lambdaIam, err := iam.NewRole(ctx, "lambdaIam", &iam.RoleArgs{
			AssumeRolePolicy: *pulumi.String(lambdaAssumeRole.Json),
		})
		if err != nil {
			return err
		}
		lambdaProcessor, err := lambda.NewFunction(ctx, "lambdaProcessor", &lambda.FunctionArgs{
			Code:    pulumi.NewFileArchive("lambda.zip"),
			Role:    lambdaIam.Arn,
			Handler: pulumi.String("exports.handler"),
			Runtime: pulumi.String("nodejs16.x"),
		})
		if err != nil {
			return err
		}
		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "extendedS3Stream", &kinesis.FirehoseDeliveryStreamArgs{
			Destination: pulumi.String("extended_s3"),
			ExtendedS3Configuration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs{
				RoleArn:   firehoseRole.Arn,
				BucketArn: bucket.Arn,
				ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs{
					Enabled: pulumi.Bool(true),
					Processors: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArray{
						&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
							Type: pulumi.String("Lambda"),
							Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
								&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
									ParameterName: pulumi.String("LambdaArn"),
									ParameterValue: lambdaProcessor.Arn.ApplyT(func(arn string) (string, error) {
										return fmt.Sprintf("%v:$LATEST", arn), nil
									}).(pulumi.StringOutput),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		_, err = s3.NewBucketAclV2(ctx, "bucketAcl", &s3.BucketAclV2Args{
			Bucket: bucket.ID(),
			Acl:    pulumi.String("private"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.s3.BucketV2;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.lambda.Function;
import com.pulumi.aws.lambda.FunctionArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs;
import com.pulumi.aws.s3.BucketAclV2;
import com.pulumi.aws.s3.BucketAclV2Args;
import com.pulumi.asset.FileArchive;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var bucket = new BucketV2("bucket");

        final var firehoseAssumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
            .statements(GetPolicyDocumentStatementArgs.builder()
                .effect("Allow")
                .principals(GetPolicyDocumentStatementPrincipalArgs.builder()
                    .type("Service")
                    .identifiers("firehose.amazonaws.com")
                    .build())
                .actions("sts:AssumeRole")
                .build())
            .build());

        var firehoseRole = new Role("firehoseRole", RoleArgs.builder()        
            .assumeRolePolicy(firehoseAssumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
            .build());

        final var lambdaAssumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
            .statements(GetPolicyDocumentStatementArgs.builder()
                .effect("Allow")
                .principals(GetPolicyDocumentStatementPrincipalArgs.builder()
                    .type("Service")
                    .identifiers("lambda.amazonaws.com")
                    .build())
                .actions("sts:AssumeRole")
                .build())
            .build());

        var lambdaIam = new Role("lambdaIam", RoleArgs.builder()        
            .assumeRolePolicy(lambdaAssumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
            .build());

        var lambdaProcessor = new Function("lambdaProcessor", FunctionArgs.builder()        
            .code(new FileArchive("lambda.zip"))
            .role(lambdaIam.arn())
            .handler("exports.handler")
            .runtime("nodejs16.x")
            .build());

        var extendedS3Stream = new FirehoseDeliveryStream("extendedS3Stream", FirehoseDeliveryStreamArgs.builder()        
            .destination("extended_s3")
            .extendedS3Configuration(FirehoseDeliveryStreamExtendedS3ConfigurationArgs.builder()
                .roleArn(firehoseRole.arn())
                .bucketArn(bucket.arn())
                .processingConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs.builder()
                    .enabled("true")
                    .processors(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
                        .type("Lambda")
                        .parameters(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                            .parameterName("LambdaArn")
                            .parameterValue(lambdaProcessor.arn().applyValue(arn -> String.format("%s:$LATEST", arn)))
                            .build())
                        .build())
                    .build())
                .build())
            .build());

        var bucketAcl = new BucketAclV2("bucketAcl", BucketAclV2Args.builder()        
            .bucket(bucket.id())
            .acl("private")
            .build());

    }
}
import pulumi
import pulumi_aws as aws

bucket = aws.s3.BucketV2("bucket")
firehose_assume_role = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
    effect="Allow",
    principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
        type="Service",
        identifiers=["firehose.amazonaws.com"],
    )],
    actions=["sts:AssumeRole"],
)])
firehose_role = aws.iam.Role("firehoseRole", assume_role_policy=firehose_assume_role.json)
lambda_assume_role = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
    effect="Allow",
    principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
        type="Service",
        identifiers=["lambda.amazonaws.com"],
    )],
    actions=["sts:AssumeRole"],
)])
lambda_iam = aws.iam.Role("lambdaIam", assume_role_policy=lambda_assume_role.json)
lambda_processor = aws.lambda_.Function("lambdaProcessor",
    code=pulumi.FileArchive("lambda.zip"),
    role=lambda_iam.arn,
    handler="exports.handler",
    runtime="nodejs16.x")
extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extendedS3Stream",
    destination="extended_s3",
    extended_s3_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs(
        role_arn=firehose_role.arn,
        bucket_arn=bucket.arn,
        processing_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs(
            enabled=True,
            processors=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
                type="Lambda",
                parameters=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
                    parameter_name="LambdaArn",
                    parameter_value=lambda_processor.arn.apply(lambda arn: f"{arn}:$LATEST"),
                )],
            )],
        ),
    ))
bucket_acl = aws.s3.BucketAclV2("bucketAcl",
    bucket=bucket.id,
    acl="private")
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const bucket = new aws.s3.BucketV2("bucket", {});
const firehoseAssumeRole = aws.iam.getPolicyDocument({
    statements: [{
        effect: "Allow",
        principals: [{
            type: "Service",
            identifiers: ["firehose.amazonaws.com"],
        }],
        actions: ["sts:AssumeRole"],
    }],
});
const firehoseRole = new aws.iam.Role("firehoseRole", {assumeRolePolicy: firehoseAssumeRole.then(firehoseAssumeRole => firehoseAssumeRole.json)});
const lambdaAssumeRole = aws.iam.getPolicyDocument({
    statements: [{
        effect: "Allow",
        principals: [{
            type: "Service",
            identifiers: ["lambda.amazonaws.com"],
        }],
        actions: ["sts:AssumeRole"],
    }],
});
const lambdaIam = new aws.iam.Role("lambdaIam", {assumeRolePolicy: lambdaAssumeRole.then(lambdaAssumeRole => lambdaAssumeRole.json)});
const lambdaProcessor = new aws.lambda.Function("lambdaProcessor", {
    code: new pulumi.asset.FileArchive("lambda.zip"),
    role: lambdaIam.arn,
    handler: "exports.handler",
    runtime: "nodejs16.x",
});
const extendedS3Stream = new aws.kinesis.FirehoseDeliveryStream("extendedS3Stream", {
    destination: "extended_s3",
    extendedS3Configuration: {
        roleArn: firehoseRole.arn,
        bucketArn: bucket.arn,
        processingConfiguration: {
            enabled: true,
            processors: [{
                type: "Lambda",
                parameters: [{
                    parameterName: "LambdaArn",
                    parameterValue: pulumi.interpolate`${lambdaProcessor.arn}:$LATEST`,
                }],
            }],
        },
    },
});
const bucketAcl = new aws.s3.BucketAclV2("bucketAcl", {
    bucket: bucket.id,
    acl: "private",
});
resources:
  extendedS3Stream:
    type: aws:kinesis:FirehoseDeliveryStream
    properties:
      destination: extended_s3
      extendedS3Configuration:
        roleArn: ${firehoseRole.arn}
        bucketArn: ${bucket.arn}
        processingConfiguration:
          enabled: 'true'
          processors:
            - type: Lambda
              parameters:
                - parameterName: LambdaArn
                  parameterValue: ${lambdaProcessor.arn}:$LATEST
  bucket:
    type: aws:s3:BucketV2
  bucketAcl:
    type: aws:s3:BucketAclV2
    properties:
      bucket: ${bucket.id}
      acl: private
  firehoseRole:
    type: aws:iam:Role
    properties:
      assumeRolePolicy: ${firehoseAssumeRole.json}
  lambdaIam:
    type: aws:iam:Role
    properties:
      assumeRolePolicy: ${lambdaAssumeRole.json}
  lambdaProcessor:
    type: aws:lambda:Function
    properties:
      code:
        fn::FileArchive: lambda.zip
      role: ${lambdaIam.arn}
      handler: exports.handler
      runtime: nodejs16.x
variables:
  firehoseAssumeRole:
    fn::invoke:
      Function: aws:iam:getPolicyDocument
      Arguments:
        statements:
          - effect: Allow
            principals:
              - type: Service
                identifiers:
                  - firehose.amazonaws.com
            actions:
              - sts:AssumeRole
  lambdaAssumeRole:
    fn::invoke:
      Function: aws:iam:getPolicyDocument
      Arguments:
        statements:
          - effect: Allow
            principals:
              - type: Service
                identifiers:
                  - lambda.amazonaws.com
            actions:
              - sts:AssumeRole

Extended S3 Destination with dynamic partitioning

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var extendedS3Stream = new Aws.Kinesis.FirehoseDeliveryStream("extendedS3Stream", new()
    {
        Destination = "extended_s3",
        ExtendedS3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs
        {
            RoleArn = aws_iam_role.Firehose_role.Arn,
            BucketArn = aws_s3_bucket.Bucket.Arn,
            BufferSize = 64,
            DynamicPartitioningConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs
            {
                Enabled = true,
            },
            Prefix = "data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
            ErrorOutputPrefix = "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
            ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
            {
                Enabled = true,
                Processors = new[]
                {
                    new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
                    {
                        Type = "RecordDeAggregation",
                        Parameters = new[]
                        {
                            new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
                            {
                                ParameterName = "SubRecordType",
                                ParameterValue = "JSON",
                            },
                        },
                    },
                    new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
                    {
                        Type = "AppendDelimiterToRecord",
                    },
                    new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
                    {
                        Type = "MetadataExtraction",
                        Parameters = new[]
                        {
                            new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
                            {
                                ParameterName = "JsonParsingEngine",
                                ParameterValue = "JQ-1.6",
                            },
                            new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
                            {
                                ParameterName = "MetadataExtractionQuery",
                                ParameterValue = "{customer_id:.customer_id}",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := kinesis.NewFirehoseDeliveryStream(ctx, "extendedS3Stream", &kinesis.FirehoseDeliveryStreamArgs{
			Destination: pulumi.String("extended_s3"),
			ExtendedS3Configuration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs{
				RoleArn:    pulumi.Any(aws_iam_role.Firehose_role.Arn),
				BucketArn:  pulumi.Any(aws_s3_bucket.Bucket.Arn),
				BufferSize: pulumi.Int(64),
				DynamicPartitioningConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs{
					Enabled: pulumi.Bool(true),
				},
				Prefix:            pulumi.String("data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/"),
				ErrorOutputPrefix: pulumi.String("errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/"),
				ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs{
					Enabled: pulumi.Bool(true),
					Processors: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArray{
						&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
							Type: pulumi.String("RecordDeAggregation"),
							Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
								&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
									ParameterName:  pulumi.String("SubRecordType"),
									ParameterValue: pulumi.String("JSON"),
								},
							},
						},
						&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
							Type: pulumi.String("AppendDelimiterToRecord"),
						},
						&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
							Type: pulumi.String("MetadataExtraction"),
							Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
								&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
									ParameterName:  pulumi.String("JsonParsingEngine"),
									ParameterValue: pulumi.String("JQ-1.6"),
								},
								&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
									ParameterName:  pulumi.String("MetadataExtractionQuery"),
									ParameterValue: pulumi.String("{customer_id:.customer_id}"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var extendedS3Stream = new FirehoseDeliveryStream("extendedS3Stream", FirehoseDeliveryStreamArgs.builder()        
            .destination("extended_s3")
            .extendedS3Configuration(FirehoseDeliveryStreamExtendedS3ConfigurationArgs.builder()
                .roleArn(aws_iam_role.firehose_role().arn())
                .bucketArn(aws_s3_bucket.bucket().arn())
                .bufferSize(64)
                .dynamicPartitioningConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs.builder()
                    .enabled("true")
                    .build())
                .prefix("data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/")
                .errorOutputPrefix("errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/")
                .processingConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs.builder()
                    .enabled("true")
                    .processors(                    
                        FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
                            .type("RecordDeAggregation")
                            .parameters(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                                .parameterName("SubRecordType")
                                .parameterValue("JSON")
                                .build())
                            .build(),
                        FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
                            .type("AppendDelimiterToRecord")
                            .build(),
                        FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
                            .type("MetadataExtraction")
                            .parameters(                            
                                FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                                    .parameterName("JsonParsingEngine")
                                    .parameterValue("JQ-1.6")
                                    .build(),
                                FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                                    .parameterName("MetadataExtractionQuery")
                                    .parameterValue("{customer_id:.customer_id}")
                                    .build())
                            .build())
                    .build())
                .build())
            .build());

    }
}
import pulumi
import pulumi_aws as aws

extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extendedS3Stream",
    destination="extended_s3",
    extended_s3_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs(
        role_arn=aws_iam_role["firehose_role"]["arn"],
        bucket_arn=aws_s3_bucket["bucket"]["arn"],
        buffer_size=64,
        dynamic_partitioning_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs(
            enabled=True,
        ),
        prefix="data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
        error_output_prefix="errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
        processing_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs(
            enabled=True,
            processors=[
                aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
                    type="RecordDeAggregation",
                    parameters=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
                        parameter_name="SubRecordType",
                        parameter_value="JSON",
                    )],
                ),
                aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
                    type="AppendDelimiterToRecord",
                ),
                aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
                    type="MetadataExtraction",
                    parameters=[
                        aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
                            parameter_name="JsonParsingEngine",
                            parameter_value="JQ-1.6",
                        ),
                        aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
                            parameter_name="MetadataExtractionQuery",
                            parameter_value="{customer_id:.customer_id}",
                        ),
                    ],
                ),
            ],
        ),
    ))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const extendedS3Stream = new aws.kinesis.FirehoseDeliveryStream("extendedS3Stream", {
    destination: "extended_s3",
    extendedS3Configuration: {
        roleArn: aws_iam_role.firehose_role.arn,
        bucketArn: aws_s3_bucket.bucket.arn,
        bufferSize: 64,
        dynamicPartitioningConfiguration: {
            enabled: true,
        },
        prefix: "data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
        errorOutputPrefix: "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
        processingConfiguration: {
            enabled: true,
            processors: [
                {
                    type: "RecordDeAggregation",
                    parameters: [{
                        parameterName: "SubRecordType",
                        parameterValue: "JSON",
                    }],
                },
                {
                    type: "AppendDelimiterToRecord",
                },
                {
                    type: "MetadataExtraction",
                    parameters: [
                        {
                            parameterName: "JsonParsingEngine",
                            parameterValue: "JQ-1.6",
                        },
                        {
                            parameterName: "MetadataExtractionQuery",
                            parameterValue: "{customer_id:.customer_id}",
                        },
                    ],
                },
            ],
        },
    },
});
resources:
  extendedS3Stream:
    type: aws:kinesis:FirehoseDeliveryStream
    properties:
      destination: extended_s3
      extendedS3Configuration:
        roleArn: ${aws_iam_role.firehose_role.arn}
        bucketArn: ${aws_s3_bucket.bucket.arn}
        bufferSize: 64
        dynamicPartitioningConfiguration:
          enabled: 'true'
        prefix: data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/
        errorOutputPrefix: errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/
        processingConfiguration:
          enabled: 'true'
          processors:
            - type: RecordDeAggregation
              parameters:
                - parameterName: SubRecordType
                  parameterValue: JSON
            - type: AppendDelimiterToRecord
            - type: MetadataExtraction
              parameters:
                - parameterName: JsonParsingEngine
                  parameterValue: JQ-1.6
                - parameterName: MetadataExtractionQuery
                  parameterValue: '{customer_id:.customer_id}'

S3 Destination (deprecated)

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var bucket = new Aws.S3.BucketV2("bucket");

    var bucketAcl = new Aws.S3.BucketAclV2("bucketAcl", new()
    {
        Bucket = bucket.Id,
        Acl = "private",
    });

    var assumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
    {
        Statements = new[]
        {
            new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
            {
                Effect = "Allow",
                Principals = new[]
                {
                    new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
                    {
                        Type = "Service",
                        Identifiers = new[]
                        {
                            "firehose.amazonaws.com",
                        },
                    },
                },
                Actions = new[]
                {
                    "sts:AssumeRole",
                },
            },
        },
    });

    var firehoseRole = new Aws.Iam.Role("firehoseRole", new()
    {
        AssumeRolePolicy = assumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
    });

    var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
    {
        Destination = "s3",
        S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
        {
            RoleArn = firehoseRole.Arn,
            BucketArn = bucket.Arn,
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/s3"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		bucket, err := s3.NewBucketV2(ctx, "bucket", nil)
		if err != nil {
			return err
		}
		_, err = s3.NewBucketAclV2(ctx, "bucketAcl", &s3.BucketAclV2Args{
			Bucket: bucket.ID(),
			Acl:    pulumi.String("private"),
		})
		if err != nil {
			return err
		}
		assumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
			Statements: []iam.GetPolicyDocumentStatement{
				{
					Effect: pulumi.StringRef("Allow"),
					Principals: []iam.GetPolicyDocumentStatementPrincipal{
						{
							Type: "Service",
							Identifiers: []string{
								"firehose.amazonaws.com",
							},
						},
					},
					Actions: []string{
						"sts:AssumeRole",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		firehoseRole, err := iam.NewRole(ctx, "firehoseRole", &iam.RoleArgs{
			AssumeRolePolicy: *pulumi.String(assumeRole.Json),
		})
		if err != nil {
			return err
		}
		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
			Destination: pulumi.String("s3"),
			S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
				RoleArn:   firehoseRole.Arn,
				BucketArn: bucket.Arn,
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.s3.BucketV2;
import com.pulumi.aws.s3.BucketAclV2;
import com.pulumi.aws.s3.BucketAclV2Args;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var bucket = new BucketV2("bucket");

        var bucketAcl = new BucketAclV2("bucketAcl", BucketAclV2Args.builder()        
            .bucket(bucket.id())
            .acl("private")
            .build());

        final var assumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
            .statements(GetPolicyDocumentStatementArgs.builder()
                .effect("Allow")
                .principals(GetPolicyDocumentStatementPrincipalArgs.builder()
                    .type("Service")
                    .identifiers("firehose.amazonaws.com")
                    .build())
                .actions("sts:AssumeRole")
                .build())
            .build());

        var firehoseRole = new Role("firehoseRole", RoleArgs.builder()        
            .assumeRolePolicy(assumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
            .build());

        var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()        
            .destination("s3")
            .s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
                .roleArn(firehoseRole.arn())
                .bucketArn(bucket.arn())
                .build())
            .build());

    }
}
import pulumi
import pulumi_aws as aws

bucket = aws.s3.BucketV2("bucket")
bucket_acl = aws.s3.BucketAclV2("bucketAcl",
    bucket=bucket.id,
    acl="private")
assume_role = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
    effect="Allow",
    principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
        type="Service",
        identifiers=["firehose.amazonaws.com"],
    )],
    actions=["sts:AssumeRole"],
)])
firehose_role = aws.iam.Role("firehoseRole", assume_role_policy=assume_role.json)
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
    destination="s3",
    s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
        role_arn=firehose_role.arn,
        bucket_arn=bucket.arn,
    ))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const bucket = new aws.s3.BucketV2("bucket", {});
const bucketAcl = new aws.s3.BucketAclV2("bucketAcl", {
    bucket: bucket.id,
    acl: "private",
});
const assumeRole = aws.iam.getPolicyDocument({
    statements: [{
        effect: "Allow",
        principals: [{
            type: "Service",
            identifiers: ["firehose.amazonaws.com"],
        }],
        actions: ["sts:AssumeRole"],
    }],
});
const firehoseRole = new aws.iam.Role("firehoseRole", {assumeRolePolicy: assumeRole.then(assumeRole => assumeRole.json)});
const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
    destination: "s3",
    s3Configuration: {
        roleArn: firehoseRole.arn,
        bucketArn: bucket.arn,
    },
});
resources:
  bucket:
    type: aws:s3:BucketV2
  bucketAcl:
    type: aws:s3:BucketAclV2
    properties:
      bucket: ${bucket.id}
      acl: private
  firehoseRole:
    type: aws:iam:Role
    properties:
      assumeRolePolicy: ${assumeRole.json}
  testStream:
    type: aws:kinesis:FirehoseDeliveryStream
    properties:
      destination: s3
      s3Configuration:
        roleArn: ${firehoseRole.arn}
        bucketArn: ${bucket.arn}
variables:
  assumeRole:
    fn::invoke:
      Function: aws:iam:getPolicyDocument
      Arguments:
        statements:
          - effect: Allow
            principals:
              - type: Service
                identifiers:
                  - firehose.amazonaws.com
            actions:
              - sts:AssumeRole

Redshift Destination

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var testCluster = new Aws.RedShift.Cluster("testCluster", new()
    {
        ClusterIdentifier = "tf-redshift-cluster",
        DatabaseName = "test",
        MasterUsername = "testuser",
        MasterPassword = "T3stPass",
        NodeType = "dc1.large",
        ClusterType = "single-node",
    });

    var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
    {
        Destination = "redshift",
        S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
        {
            RoleArn = aws_iam_role.Firehose_role.Arn,
            BucketArn = aws_s3_bucket.Bucket.Arn,
            BufferSize = 10,
            BufferInterval = 400,
            CompressionFormat = "GZIP",
        },
        RedshiftConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationArgs
        {
            RoleArn = aws_iam_role.Firehose_role.Arn,
            ClusterJdbcurl = Output.Tuple(testCluster.Endpoint, testCluster.DatabaseName).Apply(values =>
            {
                var endpoint = values.Item1;
                var databaseName = values.Item2;
                return $"jdbc:redshift://{endpoint}/{databaseName}";
            }),
            Username = "testuser",
            Password = "T3stPass",
            DataTableName = "test-table",
            CopyOptions = "delimiter '|'",
            DataTableColumns = "test-col",
            S3BackupMode = "Enabled",
            S3BackupConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs
            {
                RoleArn = aws_iam_role.Firehose_role.Arn,
                BucketArn = aws_s3_bucket.Bucket.Arn,
                BufferSize = 15,
                BufferInterval = 300,
                CompressionFormat = "GZIP",
            },
        },
    });

});
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/redshift"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		testCluster, err := redshift.NewCluster(ctx, "testCluster", &redshift.ClusterArgs{
			ClusterIdentifier: pulumi.String("tf-redshift-cluster"),
			DatabaseName:      pulumi.String("test"),
			MasterUsername:    pulumi.String("testuser"),
			MasterPassword:    pulumi.String("T3stPass"),
			NodeType:          pulumi.String("dc1.large"),
			ClusterType:       pulumi.String("single-node"),
		})
		if err != nil {
			return err
		}
		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
			Destination: pulumi.String("redshift"),
			S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
				RoleArn:           pulumi.Any(aws_iam_role.Firehose_role.Arn),
				BucketArn:         pulumi.Any(aws_s3_bucket.Bucket.Arn),
				BufferSize:        pulumi.Int(10),
				BufferInterval:    pulumi.Int(400),
				CompressionFormat: pulumi.String("GZIP"),
			},
			RedshiftConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationArgs{
				RoleArn: pulumi.Any(aws_iam_role.Firehose_role.Arn),
				ClusterJdbcurl: pulumi.All(testCluster.Endpoint, testCluster.DatabaseName).ApplyT(func(_args []interface{}) (string, error) {
					endpoint := _args[0].(string)
					databaseName := _args[1].(string)
					return fmt.Sprintf("jdbc:redshift://%v/%v", endpoint, databaseName), nil
				}).(pulumi.StringOutput),
				Username:         pulumi.String("testuser"),
				Password:         pulumi.String("T3stPass"),
				DataTableName:    pulumi.String("test-table"),
				CopyOptions:      pulumi.String("delimiter '|'"),
				DataTableColumns: pulumi.String("test-col"),
				S3BackupMode:     pulumi.String("Enabled"),
				S3BackupConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs{
					RoleArn:           pulumi.Any(aws_iam_role.Firehose_role.Arn),
					BucketArn:         pulumi.Any(aws_s3_bucket.Bucket.Arn),
					BufferSize:        pulumi.Int(15),
					BufferInterval:    pulumi.Int(300),
					CompressionFormat: pulumi.String("GZIP"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.redshift.Cluster;
import com.pulumi.aws.redshift.ClusterArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamRedshiftConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var testCluster = new Cluster("testCluster", ClusterArgs.builder()        
            .clusterIdentifier("tf-redshift-cluster")
            .databaseName("test")
            .masterUsername("testuser")
            .masterPassword("T3stPass")
            .nodeType("dc1.large")
            .clusterType("single-node")
            .build());

        var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()        
            .destination("redshift")
            .s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
                .roleArn(aws_iam_role.firehose_role().arn())
                .bucketArn(aws_s3_bucket.bucket().arn())
                .bufferSize(10)
                .bufferInterval(400)
                .compressionFormat("GZIP")
                .build())
            .redshiftConfiguration(FirehoseDeliveryStreamRedshiftConfigurationArgs.builder()
                .roleArn(aws_iam_role.firehose_role().arn())
                .clusterJdbcurl(Output.tuple(testCluster.endpoint(), testCluster.databaseName()).applyValue(values -> {
                    var endpoint = values.t1;
                    var databaseName = values.t2;
                    return String.format("jdbc:redshift://%s/%s", endpoint,databaseName);
                }))
                .username("testuser")
                .password("T3stPass")
                .dataTableName("test-table")
                .copyOptions("delimiter '|'")
                .dataTableColumns("test-col")
                .s3BackupMode("Enabled")
                .s3BackupConfiguration(FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs.builder()
                    .roleArn(aws_iam_role.firehose_role().arn())
                    .bucketArn(aws_s3_bucket.bucket().arn())
                    .bufferSize(15)
                    .bufferInterval(300)
                    .compressionFormat("GZIP")
                    .build())
                .build())
            .build());

    }
}
import pulumi
import pulumi_aws as aws

test_cluster = aws.redshift.Cluster("testCluster",
    cluster_identifier="tf-redshift-cluster",
    database_name="test",
    master_username="testuser",
    master_password="T3stPass",
    node_type="dc1.large",
    cluster_type="single-node")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
    destination="redshift",
    s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
        role_arn=aws_iam_role["firehose_role"]["arn"],
        bucket_arn=aws_s3_bucket["bucket"]["arn"],
        buffer_size=10,
        buffer_interval=400,
        compression_format="GZIP",
    ),
    redshift_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationArgs(
        role_arn=aws_iam_role["firehose_role"]["arn"],
        cluster_jdbcurl=pulumi.Output.all(test_cluster.endpoint, test_cluster.database_name).apply(lambda endpoint, database_name: f"jdbc:redshift://{endpoint}/{database_name}"),
        username="testuser",
        password="T3stPass",
        data_table_name="test-table",
        copy_options="delimiter '|'",
        data_table_columns="test-col",
        s3_backup_mode="Enabled",
        s3_backup_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs(
            role_arn=aws_iam_role["firehose_role"]["arn"],
            bucket_arn=aws_s3_bucket["bucket"]["arn"],
            buffer_size=15,
            buffer_interval=300,
            compression_format="GZIP",
        ),
    ))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const testCluster = new aws.redshift.Cluster("testCluster", {
    clusterIdentifier: "tf-redshift-cluster",
    databaseName: "test",
    masterUsername: "testuser",
    masterPassword: "T3stPass",
    nodeType: "dc1.large",
    clusterType: "single-node",
});
const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
    destination: "redshift",
    s3Configuration: {
        roleArn: aws_iam_role.firehose_role.arn,
        bucketArn: aws_s3_bucket.bucket.arn,
        bufferSize: 10,
        bufferInterval: 400,
        compressionFormat: "GZIP",
    },
    redshiftConfiguration: {
        roleArn: aws_iam_role.firehose_role.arn,
        clusterJdbcurl: pulumi.interpolate`jdbc:redshift://${testCluster.endpoint}/${testCluster.databaseName}`,
        username: "testuser",
        password: "T3stPass",
        dataTableName: "test-table",
        copyOptions: "delimiter '|'",
        dataTableColumns: "test-col",
        s3BackupMode: "Enabled",
        s3BackupConfiguration: {
            roleArn: aws_iam_role.firehose_role.arn,
            bucketArn: aws_s3_bucket.bucket.arn,
            bufferSize: 15,
            bufferInterval: 300,
            compressionFormat: "GZIP",
        },
    },
});
resources:
  testCluster:
    type: aws:redshift:Cluster
    properties:
      clusterIdentifier: tf-redshift-cluster
      databaseName: test
      masterUsername: testuser
      masterPassword: T3stPass
      nodeType: dc1.large
      clusterType: single-node
  testStream:
    type: aws:kinesis:FirehoseDeliveryStream
    properties:
      destination: redshift
      s3Configuration:
        roleArn: ${aws_iam_role.firehose_role.arn}
        bucketArn: ${aws_s3_bucket.bucket.arn}
        bufferSize: 10
        bufferInterval: 400
        compressionFormat: GZIP
      redshiftConfiguration:
        roleArn: ${aws_iam_role.firehose_role.arn}
        clusterJdbcurl: jdbc:redshift://${testCluster.endpoint}/${testCluster.databaseName}
        username: testuser
        password: T3stPass
        dataTableName: test-table
        copyOptions: delimiter '|'
        dataTableColumns: test-col
        s3BackupMode: Enabled
        s3BackupConfiguration:
          roleArn: ${aws_iam_role.firehose_role.arn}
          bucketArn: ${aws_s3_bucket.bucket.arn}
          bufferSize: 15
          bufferInterval: 300
          compressionFormat: GZIP

Elasticsearch Destination

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var testCluster = new Aws.ElasticSearch.Domain("testCluster");

    var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
    {
        Destination = "elasticsearch",
        S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
        {
            RoleArn = aws_iam_role.Firehose_role.Arn,
            BucketArn = aws_s3_bucket.Bucket.Arn,
            BufferSize = 10,
            BufferInterval = 400,
            CompressionFormat = "GZIP",
        },
        ElasticsearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs
        {
            DomainArn = testCluster.Arn,
            RoleArn = aws_iam_role.Firehose_role.Arn,
            IndexName = "test",
            TypeName = "test",
            ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs
            {
                Enabled = true,
                Processors = new[]
                {
                    new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs
                    {
                        Type = "Lambda",
                        Parameters = new[]
                        {
                            new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs
                            {
                                ParameterName = "LambdaArn",
                                ParameterValue = $"{aws_lambda_function.Lambda_processor.Arn}:$LATEST",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/elasticsearch"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		testCluster, err := elasticsearch.NewDomain(ctx, "testCluster", nil)
		if err != nil {
			return err
		}
		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
			Destination: pulumi.String("elasticsearch"),
			S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
				RoleArn:           pulumi.Any(aws_iam_role.Firehose_role.Arn),
				BucketArn:         pulumi.Any(aws_s3_bucket.Bucket.Arn),
				BufferSize:        pulumi.Int(10),
				BufferInterval:    pulumi.Int(400),
				CompressionFormat: pulumi.String("GZIP"),
			},
			ElasticsearchConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs{
				DomainArn: testCluster.Arn,
				RoleArn:   pulumi.Any(aws_iam_role.Firehose_role.Arn),
				IndexName: pulumi.String("test"),
				TypeName:  pulumi.String("test"),
				ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs{
					Enabled: pulumi.Bool(true),
					Processors: kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArray{
						&kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs{
							Type: pulumi.String("Lambda"),
							Parameters: kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArray{
								&kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs{
									ParameterName:  pulumi.String("LambdaArn"),
									ParameterValue: pulumi.String(fmt.Sprintf("%v:$LATEST", aws_lambda_function.Lambda_processor.Arn)),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.elasticsearch.Domain;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var testCluster = new Domain("testCluster");

        var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()        
            .destination("elasticsearch")
            .s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
                .roleArn(aws_iam_role.firehose_role().arn())
                .bucketArn(aws_s3_bucket.bucket().arn())
                .bufferSize(10)
                .bufferInterval(400)
                .compressionFormat("GZIP")
                .build())
            .elasticsearchConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationArgs.builder()
                .domainArn(testCluster.arn())
                .roleArn(aws_iam_role.firehose_role().arn())
                .indexName("test")
                .typeName("test")
                .processingConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs.builder()
                    .enabled("true")
                    .processors(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs.builder()
                        .type("Lambda")
                        .parameters(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                            .parameterName("LambdaArn")
                            .parameterValue(String.format("%s:$LATEST", aws_lambda_function.lambda_processor().arn()))
                            .build())
                        .build())
                    .build())
                .build())
            .build());

    }
}
import pulumi
import pulumi_aws as aws

test_cluster = aws.elasticsearch.Domain("testCluster")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
    destination="elasticsearch",
    s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
        role_arn=aws_iam_role["firehose_role"]["arn"],
        bucket_arn=aws_s3_bucket["bucket"]["arn"],
        buffer_size=10,
        buffer_interval=400,
        compression_format="GZIP",
    ),
    elasticsearch_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs(
        domain_arn=test_cluster.arn,
        role_arn=aws_iam_role["firehose_role"]["arn"],
        index_name="test",
        type_name="test",
        processing_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs(
            enabled=True,
            processors=[aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs(
                type="Lambda",
                parameters=[aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs(
                    parameter_name="LambdaArn",
                    parameter_value=f"{aws_lambda_function['lambda_processor']['arn']}:$LATEST",
                )],
            )],
        ),
    ))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const testCluster = new aws.elasticsearch.Domain("testCluster", {});
const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
    destination: "elasticsearch",
    s3Configuration: {
        roleArn: aws_iam_role.firehose_role.arn,
        bucketArn: aws_s3_bucket.bucket.arn,
        bufferSize: 10,
        bufferInterval: 400,
        compressionFormat: "GZIP",
    },
    elasticsearchConfiguration: {
        domainArn: testCluster.arn,
        roleArn: aws_iam_role.firehose_role.arn,
        indexName: "test",
        typeName: "test",
        processingConfiguration: {
            enabled: true,
            processors: [{
                type: "Lambda",
                parameters: [{
                    parameterName: "LambdaArn",
                    parameterValue: `${aws_lambda_function.lambda_processor.arn}:$LATEST`,
                }],
            }],
        },
    },
});
resources:
  testCluster:
    type: aws:elasticsearch:Domain
  testStream:
    type: aws:kinesis:FirehoseDeliveryStream
    properties:
      destination: elasticsearch
      s3Configuration:
        roleArn: ${aws_iam_role.firehose_role.arn}
        bucketArn: ${aws_s3_bucket.bucket.arn}
        bufferSize: 10
        bufferInterval: 400
        compressionFormat: GZIP
      elasticsearchConfiguration:
        domainArn: ${testCluster.arn}
        roleArn: ${aws_iam_role.firehose_role.arn}
        indexName: test
        typeName: test
        processingConfiguration:
          enabled: 'true'
          processors:
            - type: Lambda
              parameters:
                - parameterName: LambdaArn
                  parameterValue: ${aws_lambda_function.lambda_processor.arn}:$LATEST

Elasticsearch Destination With VPC

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var testCluster = new Aws.ElasticSearch.Domain("testCluster", new()
    {
        ClusterConfig = new Aws.ElasticSearch.Inputs.DomainClusterConfigArgs
        {
            InstanceCount = 2,
            ZoneAwarenessEnabled = true,
            InstanceType = "t2.small.elasticsearch",
        },
        EbsOptions = new Aws.ElasticSearch.Inputs.DomainEbsOptionsArgs
        {
            EbsEnabled = true,
            VolumeSize = 10,
        },
        VpcOptions = new Aws.ElasticSearch.Inputs.DomainVpcOptionsArgs
        {
            SecurityGroupIds = new[]
            {
                aws_security_group.First.Id,
            },
            SubnetIds = new[]
            {
                aws_subnet.First.Id,
                aws_subnet.Second.Id,
            },
        },
    });

    var firehose_elasticsearchPolicyDocument = Aws.Iam.GetPolicyDocument.Invoke(new()
    {
        Statements = new[]
        {
            new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
            {
                Effect = "Allow",
                Actions = new[]
                {
                    "es:*",
                },
                Resources = new[]
                {
                    testCluster.Arn,
                    $"{testCluster.Arn}/*",
                },
            },
            new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
            {
                Effect = "Allow",
                Actions = new[]
                {
                    "ec2:DescribeVpcs",
                    "ec2:DescribeVpcAttribute",
                    "ec2:DescribeSubnets",
                    "ec2:DescribeSecurityGroups",
                    "ec2:DescribeNetworkInterfaces",
                    "ec2:CreateNetworkInterface",
                    "ec2:CreateNetworkInterfacePermission",
                    "ec2:DeleteNetworkInterface",
                },
                Resources = new[]
                {
                    "*",
                },
            },
        },
    });

    var firehose_elasticsearchRolePolicy = new Aws.Iam.RolePolicy("firehose-elasticsearchRolePolicy", new()
    {
        Role = aws_iam_role.Firehose.Id,
        Policy = firehose_elasticsearchPolicyDocument.Apply(firehose_elasticsearchPolicyDocument => firehose_elasticsearchPolicyDocument.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json)),
    });

    var test = new Aws.Kinesis.FirehoseDeliveryStream("test", new()
    {
        Destination = "elasticsearch",
        S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
        {
            RoleArn = aws_iam_role.Firehose.Arn,
            BucketArn = aws_s3_bucket.Bucket.Arn,
        },
        ElasticsearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs
        {
            DomainArn = testCluster.Arn,
            RoleArn = aws_iam_role.Firehose.Arn,
            IndexName = "test",
            TypeName = "test",
            VpcConfig = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs
            {
                SubnetIds = new[]
                {
                    aws_subnet.First.Id,
                    aws_subnet.Second.Id,
                },
                SecurityGroupIds = new[]
                {
                    aws_security_group.First.Id,
                },
                RoleArn = aws_iam_role.Firehose.Arn,
            },
        },
    }, new CustomResourceOptions
    {
        DependsOn = new[]
        {
            firehose_elasticsearchRolePolicy,
        },
    });

});
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/elasticsearch"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		testCluster, err := elasticsearch.NewDomain(ctx, "testCluster", &elasticsearch.DomainArgs{
			ClusterConfig: &elasticsearch.DomainClusterConfigArgs{
				InstanceCount:        pulumi.Int(2),
				ZoneAwarenessEnabled: pulumi.Bool(true),
				InstanceType:         pulumi.String("t2.small.elasticsearch"),
			},
			EbsOptions: &elasticsearch.DomainEbsOptionsArgs{
				EbsEnabled: pulumi.Bool(true),
				VolumeSize: pulumi.Int(10),
			},
			VpcOptions: &elasticsearch.DomainVpcOptionsArgs{
				SecurityGroupIds: pulumi.StringArray{
					aws_security_group.First.Id,
				},
				SubnetIds: pulumi.StringArray{
					aws_subnet.First.Id,
					aws_subnet.Second.Id,
				},
			},
		})
		if err != nil {
			return err
		}
		firehose_elasticsearchPolicyDocument := iam.GetPolicyDocumentOutput(ctx, iam.GetPolicyDocumentOutputArgs{
			Statements: iam.GetPolicyDocumentStatementArray{
				&iam.GetPolicyDocumentStatementArgs{
					Effect: pulumi.String("Allow"),
					Actions: pulumi.StringArray{
						pulumi.String("es:*"),
					},
					Resources: pulumi.StringArray{
						testCluster.Arn,
						testCluster.Arn.ApplyT(func(arn string) (string, error) {
							return fmt.Sprintf("%v/*", arn), nil
						}).(pulumi.StringOutput),
					},
				},
				&iam.GetPolicyDocumentStatementArgs{
					Effect: pulumi.String("Allow"),
					Actions: pulumi.StringArray{
						pulumi.String("ec2:DescribeVpcs"),
						pulumi.String("ec2:DescribeVpcAttribute"),
						pulumi.String("ec2:DescribeSubnets"),
						pulumi.String("ec2:DescribeSecurityGroups"),
						pulumi.String("ec2:DescribeNetworkInterfaces"),
						pulumi.String("ec2:CreateNetworkInterface"),
						pulumi.String("ec2:CreateNetworkInterfacePermission"),
						pulumi.String("ec2:DeleteNetworkInterface"),
					},
					Resources: pulumi.StringArray{
						pulumi.String("*"),
					},
				},
			},
		}, nil)
		_, err = iam.NewRolePolicy(ctx, "firehose-elasticsearchRolePolicy", &iam.RolePolicyArgs{
			Role: pulumi.Any(aws_iam_role.Firehose.Id),
			Policy: firehose_elasticsearchPolicyDocument.ApplyT(func(firehose_elasticsearchPolicyDocument iam.GetPolicyDocumentResult) (*string, error) {
				return &firehose_elasticsearchPolicyDocument.Json, nil
			}).(pulumi.StringPtrOutput),
		})
		if err != nil {
			return err
		}
		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test", &kinesis.FirehoseDeliveryStreamArgs{
			Destination: pulumi.String("elasticsearch"),
			S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
				RoleArn:   pulumi.Any(aws_iam_role.Firehose.Arn),
				BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
			},
			ElasticsearchConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs{
				DomainArn: testCluster.Arn,
				RoleArn:   pulumi.Any(aws_iam_role.Firehose.Arn),
				IndexName: pulumi.String("test"),
				TypeName:  pulumi.String("test"),
				VpcConfig: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs{
					SubnetIds: pulumi.StringArray{
						aws_subnet.First.Id,
						aws_subnet.Second.Id,
					},
					SecurityGroupIds: pulumi.StringArray{
						aws_security_group.First.Id,
					},
					RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
				},
			},
		}, pulumi.DependsOn([]pulumi.Resource{
			firehose_elasticsearchRolePolicy,
		}))
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.elasticsearch.Domain;
import com.pulumi.aws.elasticsearch.DomainArgs;
import com.pulumi.aws.elasticsearch.inputs.DomainClusterConfigArgs;
import com.pulumi.aws.elasticsearch.inputs.DomainEbsOptionsArgs;
import com.pulumi.aws.elasticsearch.inputs.DomainVpcOptionsArgs;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.RolePolicy;
import com.pulumi.aws.iam.RolePolicyArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var testCluster = new Domain("testCluster", DomainArgs.builder()        
            .clusterConfig(DomainClusterConfigArgs.builder()
                .instanceCount(2)
                .zoneAwarenessEnabled(true)
                .instanceType("t2.small.elasticsearch")
                .build())
            .ebsOptions(DomainEbsOptionsArgs.builder()
                .ebsEnabled(true)
                .volumeSize(10)
                .build())
            .vpcOptions(DomainVpcOptionsArgs.builder()
                .securityGroupIds(aws_security_group.first().id())
                .subnetIds(                
                    aws_subnet.first().id(),
                    aws_subnet.second().id())
                .build())
            .build());

        final var firehose-elasticsearchPolicyDocument = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
            .statements(            
                GetPolicyDocumentStatementArgs.builder()
                    .effect("Allow")
                    .actions("es:*")
                    .resources(                    
                        testCluster.arn(),
                        testCluster.arn().applyValue(arn -> String.format("%s/*", arn)))
                    .build(),
                GetPolicyDocumentStatementArgs.builder()
                    .effect("Allow")
                    .actions(                    
                        "ec2:DescribeVpcs",
                        "ec2:DescribeVpcAttribute",
                        "ec2:DescribeSubnets",
                        "ec2:DescribeSecurityGroups",
                        "ec2:DescribeNetworkInterfaces",
                        "ec2:CreateNetworkInterface",
                        "ec2:CreateNetworkInterfacePermission",
                        "ec2:DeleteNetworkInterface")
                    .resources("*")
                    .build())
            .build());

        var firehose_elasticsearchRolePolicy = new RolePolicy("firehose-elasticsearchRolePolicy", RolePolicyArgs.builder()        
            .role(aws_iam_role.firehose().id())
            .policy(firehose_elasticsearchPolicyDocument.applyValue(firehose_elasticsearchPolicyDocument -> firehose_elasticsearchPolicyDocument.json()))
            .build());

        var test = new FirehoseDeliveryStream("test", FirehoseDeliveryStreamArgs.builder()        
            .destination("elasticsearch")
            .s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
                .roleArn(aws_iam_role.firehose().arn())
                .bucketArn(aws_s3_bucket.bucket().arn())
                .build())
            .elasticsearchConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationArgs.builder()
                .domainArn(testCluster.arn())
                .roleArn(aws_iam_role.firehose().arn())
                .indexName("test")
                .typeName("test")
                .vpcConfig(FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs.builder()
                    .subnetIds(                    
                        aws_subnet.first().id(),
                        aws_subnet.second().id())
                    .securityGroupIds(aws_security_group.first().id())
                    .roleArn(aws_iam_role.firehose().arn())
                    .build())
                .build())
            .build(), CustomResourceOptions.builder()
                .dependsOn(firehose_elasticsearchRolePolicy)
                .build());

    }
}
import pulumi
import pulumi_aws as aws

test_cluster = aws.elasticsearch.Domain("testCluster",
    cluster_config=aws.elasticsearch.DomainClusterConfigArgs(
        instance_count=2,
        zone_awareness_enabled=True,
        instance_type="t2.small.elasticsearch",
    ),
    ebs_options=aws.elasticsearch.DomainEbsOptionsArgs(
        ebs_enabled=True,
        volume_size=10,
    ),
    vpc_options=aws.elasticsearch.DomainVpcOptionsArgs(
        security_group_ids=[aws_security_group["first"]["id"]],
        subnet_ids=[
            aws_subnet["first"]["id"],
            aws_subnet["second"]["id"],
        ],
    ))
firehose_elasticsearch_policy_document = aws.iam.get_policy_document_output(statements=[
    aws.iam.GetPolicyDocumentStatementArgs(
        effect="Allow",
        actions=["es:*"],
        resources=[
            test_cluster.arn,
            test_cluster.arn.apply(lambda arn: f"{arn}/*"),
        ],
    ),
    aws.iam.GetPolicyDocumentStatementArgs(
        effect="Allow",
        actions=[
            "ec2:DescribeVpcs",
            "ec2:DescribeVpcAttribute",
            "ec2:DescribeSubnets",
            "ec2:DescribeSecurityGroups",
            "ec2:DescribeNetworkInterfaces",
            "ec2:CreateNetworkInterface",
            "ec2:CreateNetworkInterfacePermission",
            "ec2:DeleteNetworkInterface",
        ],
        resources=["*"],
    ),
])
firehose_elasticsearch_role_policy = aws.iam.RolePolicy("firehose-elasticsearchRolePolicy",
    role=aws_iam_role["firehose"]["id"],
    policy=firehose_elasticsearch_policy_document.json)
test = aws.kinesis.FirehoseDeliveryStream("test",
    destination="elasticsearch",
    s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
        role_arn=aws_iam_role["firehose"]["arn"],
        bucket_arn=aws_s3_bucket["bucket"]["arn"],
    ),
    elasticsearch_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs(
        domain_arn=test_cluster.arn,
        role_arn=aws_iam_role["firehose"]["arn"],
        index_name="test",
        type_name="test",
        vpc_config=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs(
            subnet_ids=[
                aws_subnet["first"]["id"],
                aws_subnet["second"]["id"],
            ],
            security_group_ids=[aws_security_group["first"]["id"]],
            role_arn=aws_iam_role["firehose"]["arn"],
        ),
    ),
    opts=pulumi.ResourceOptions(depends_on=[firehose_elasticsearch_role_policy]))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const testCluster = new aws.elasticsearch.Domain("testCluster", {
    clusterConfig: {
        instanceCount: 2,
        zoneAwarenessEnabled: true,
        instanceType: "t2.small.elasticsearch",
    },
    ebsOptions: {
        ebsEnabled: true,
        volumeSize: 10,
    },
    vpcOptions: {
        securityGroupIds: [aws_security_group.first.id],
        subnetIds: [
            aws_subnet.first.id,
            aws_subnet.second.id,
        ],
    },
});
const firehose-elasticsearchPolicyDocument = aws.iam.getPolicyDocumentOutput({
    statements: [
        {
            effect: "Allow",
            actions: ["es:*"],
            resources: [
                testCluster.arn,
                pulumi.interpolate`${testCluster.arn}/*`,
            ],
        },
        {
            effect: "Allow",
            actions: [
                "ec2:DescribeVpcs",
                "ec2:DescribeVpcAttribute",
                "ec2:DescribeSubnets",
                "ec2:DescribeSecurityGroups",
                "ec2:DescribeNetworkInterfaces",
                "ec2:CreateNetworkInterface",
                "ec2:CreateNetworkInterfacePermission",
                "ec2:DeleteNetworkInterface",
            ],
            resources: ["*"],
        },
    ],
});
const firehose_elasticsearchRolePolicy = new aws.iam.RolePolicy("firehose-elasticsearchRolePolicy", {
    role: aws_iam_role.firehose.id,
    policy: firehose_elasticsearchPolicyDocument.apply(firehose_elasticsearchPolicyDocument => firehose_elasticsearchPolicyDocument.json),
});
const test = new aws.kinesis.FirehoseDeliveryStream("test", {
    destination: "elasticsearch",
    s3Configuration: {
        roleArn: aws_iam_role.firehose.arn,
        bucketArn: aws_s3_bucket.bucket.arn,
    },
    elasticsearchConfiguration: {
        domainArn: testCluster.arn,
        roleArn: aws_iam_role.firehose.arn,
        indexName: "test",
        typeName: "test",
        vpcConfig: {
            subnetIds: [
                aws_subnet.first.id,
                aws_subnet.second.id,
            ],
            securityGroupIds: [aws_security_group.first.id],
            roleArn: aws_iam_role.firehose.arn,
        },
    },
}, {
    dependsOn: [firehose_elasticsearchRolePolicy],
});
resources:
  testCluster:
    type: aws:elasticsearch:Domain
    properties:
      clusterConfig:
        instanceCount: 2
        zoneAwarenessEnabled: true
        instanceType: t2.small.elasticsearch
      ebsOptions:
        ebsEnabled: true
        volumeSize: 10
      vpcOptions:
        securityGroupIds:
          - ${aws_security_group.first.id}
        subnetIds:
          - ${aws_subnet.first.id}
          - ${aws_subnet.second.id}
  firehose-elasticsearchRolePolicy:
    type: aws:iam:RolePolicy
    properties:
      role: ${aws_iam_role.firehose.id}
      policy: ${["firehose-elasticsearchPolicyDocument"].json}
  test:
    type: aws:kinesis:FirehoseDeliveryStream
    properties:
      destination: elasticsearch
      s3Configuration:
        roleArn: ${aws_iam_role.firehose.arn}
        bucketArn: ${aws_s3_bucket.bucket.arn}
      elasticsearchConfiguration:
        domainArn: ${testCluster.arn}
        roleArn: ${aws_iam_role.firehose.arn}
        indexName: test
        typeName: test
        vpcConfig:
          subnetIds:
            - ${aws_subnet.first.id}
            - ${aws_subnet.second.id}
          securityGroupIds:
            - ${aws_security_group.first.id}
          roleArn: ${aws_iam_role.firehose.arn}
    options:
      dependson:
        - ${["firehose-elasticsearchRolePolicy"]}
variables:
  firehose-elasticsearchPolicyDocument:
    fn::invoke:
      Function: aws:iam:getPolicyDocument
      Arguments:
        statements:
          - effect: Allow
            actions:
              - es:*
            resources:
              - ${testCluster.arn}
              - ${testCluster.arn}/*
          - effect: Allow
            actions:
              - ec2:DescribeVpcs
              - ec2:DescribeVpcAttribute
              - ec2:DescribeSubnets
              - ec2:DescribeSecurityGroups
              - ec2:DescribeNetworkInterfaces
              - ec2:CreateNetworkInterface
              - ec2:CreateNetworkInterfacePermission
              - ec2:DeleteNetworkInterface
            resources:
              - '*'

Opensearch Destination

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var testCluster = new Aws.OpenSearch.Domain("testCluster");

    var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
    {
        Destination = "opensearch",
        S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
        {
            RoleArn = aws_iam_role.Firehose_role.Arn,
            BucketArn = aws_s3_bucket.Bucket.Arn,
            BufferSize = 10,
            BufferInterval = 400,
            CompressionFormat = "GZIP",
        },
        OpensearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs
        {
            DomainArn = testCluster.Arn,
            RoleArn = aws_iam_role.Firehose_role.Arn,
            IndexName = "test",
            ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs
            {
                Enabled = true,
                Processors = new[]
                {
                    new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs
                    {
                        Type = "Lambda",
                        Parameters = new[]
                        {
                            new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs
                            {
                                ParameterName = "LambdaArn",
                                ParameterValue = $"{aws_lambda_function.Lambda_processor.Arn}:$LATEST",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/opensearch"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		testCluster, err := opensearch.NewDomain(ctx, "testCluster", nil)
		if err != nil {
			return err
		}
		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
			Destination: pulumi.String("opensearch"),
			S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
				RoleArn:           pulumi.Any(aws_iam_role.Firehose_role.Arn),
				BucketArn:         pulumi.Any(aws_s3_bucket.Bucket.Arn),
				BufferSize:        pulumi.Int(10),
				BufferInterval:    pulumi.Int(400),
				CompressionFormat: pulumi.String("GZIP"),
			},
			OpensearchConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs{
				DomainArn: testCluster.Arn,
				RoleArn:   pulumi.Any(aws_iam_role.Firehose_role.Arn),
				IndexName: pulumi.String("test"),
				ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs{
					Enabled: pulumi.Bool(true),
					Processors: kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArray{
						&kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs{
							Type: pulumi.String("Lambda"),
							Parameters: kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArray{
								&kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs{
									ParameterName:  pulumi.String("LambdaArn"),
									ParameterValue: pulumi.String(fmt.Sprintf("%v:$LATEST", aws_lambda_function.Lambda_processor.Arn)),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.opensearch.Domain;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var testCluster = new Domain("testCluster");

        var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()        
            .destination("opensearch")
            .s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
                .roleArn(aws_iam_role.firehose_role().arn())
                .bucketArn(aws_s3_bucket.bucket().arn())
                .bufferSize(10)
                .bufferInterval(400)
                .compressionFormat("GZIP")
                .build())
            .opensearchConfiguration(FirehoseDeliveryStreamOpensearchConfigurationArgs.builder()
                .domainArn(testCluster.arn())
                .roleArn(aws_iam_role.firehose_role().arn())
                .indexName("test")
                .processingConfiguration(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs.builder()
                    .enabled("true")
                    .processors(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs.builder()
                        .type("Lambda")
                        .parameters(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs.builder()
                            .parameterName("LambdaArn")
                            .parameterValue(String.format("%s:$LATEST", aws_lambda_function.lambda_processor().arn()))
                            .build())
                        .build())
                    .build())
                .build())
            .build());

    }
}
import pulumi
import pulumi_aws as aws

test_cluster = aws.opensearch.Domain("testCluster")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
    destination="opensearch",
    s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
        role_arn=aws_iam_role["firehose_role"]["arn"],
        bucket_arn=aws_s3_bucket["bucket"]["arn"],
        buffer_size=10,
        buffer_interval=400,
        compression_format="GZIP",
    ),
    opensearch_configuration=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs(
        domain_arn=test_cluster.arn,
        role_arn=aws_iam_role["firehose_role"]["arn"],
        index_name="test",
        processing_configuration=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs(
            enabled=True,
            processors=[aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs(
                type="Lambda",
                parameters=[aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs(
                    parameter_name="LambdaArn",
                    parameter_value=f"{aws_lambda_function['lambda_processor']['arn']}:$LATEST",
                )],
            )],
        ),
    ))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const testCluster = new aws.opensearch.Domain("testCluster", {});
const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
    destination: "opensearch",
    s3Configuration: {
        roleArn: aws_iam_role.firehose_role.arn,
        bucketArn: aws_s3_bucket.bucket.arn,
        bufferSize: 10,
        bufferInterval: 400,
        compressionFormat: "GZIP",
    },
    opensearchConfiguration: {
        domainArn: testCluster.arn,
        roleArn: aws_iam_role.firehose_role.arn,
        indexName: "test",
        processingConfiguration: {
            enabled: true,
            processors: [{
                type: "Lambda",
                parameters: [{
                    parameterName: "LambdaArn",
                    parameterValue: `${aws_lambda_function.lambda_processor.arn}:$LATEST`,
                }],
            }],
        },
    },
});
resources:
  testCluster:
    type: aws:opensearch:Domain
  testStream:
    type: aws:kinesis:FirehoseDeliveryStream
    properties:
      destination: opensearch
      s3Configuration:
        roleArn: ${aws_iam_role.firehose_role.arn}
        bucketArn: ${aws_s3_bucket.bucket.arn}
        bufferSize: 10
        bufferInterval: 400
        compressionFormat: GZIP
      opensearchConfiguration:
        domainArn: ${testCluster.arn}
        roleArn: ${aws_iam_role.firehose_role.arn}
        indexName: test
        processingConfiguration:
          enabled: 'true'
          processors:
            - type: Lambda
              parameters:
                - parameterName: LambdaArn
                  parameterValue: ${aws_lambda_function.lambda_processor.arn}:$LATEST

Opensearch Destination With VPC

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var testCluster = new Aws.OpenSearch.Domain("testCluster", new()
    {
        ClusterConfig = new Aws.OpenSearch.Inputs.DomainClusterConfigArgs
        {
            InstanceCount = 2,
            ZoneAwarenessEnabled = true,
            InstanceType = "m4.large.search",
        },
        EbsOptions = new Aws.OpenSearch.Inputs.DomainEbsOptionsArgs
        {
            EbsEnabled = true,
            VolumeSize = 10,
        },
        VpcOptions = new Aws.OpenSearch.Inputs.DomainVpcOptionsArgs
        {
            SecurityGroupIds = new[]
            {
                aws_security_group.First.Id,
            },
            SubnetIds = new[]
            {
                aws_subnet.First.Id,
                aws_subnet.Second.Id,
            },
        },
    });

    var firehose_opensearch = new Aws.Iam.RolePolicy("firehose-opensearch", new()
    {
        Role = aws_iam_role.Firehose.Id,
        Policy = Output.Tuple(testCluster.Arn, testCluster.Arn).Apply(values =>
        {
            var testClusterArn = values.Item1;
            var testClusterArn1 = values.Item2;
            return @$"{{
  ""Version"": ""2012-10-17"",
  ""Statement"": [
    {{
      ""Effect"": ""Allow"",
      ""Action"": [
        ""es:*""
      ],
      ""Resource"": [
        ""{testClusterArn}"",
        ""{testClusterArn1}/*""
      ]
        }},
        {{
          ""Effect"": ""Allow"",
          ""Action"": [
            ""ec2:DescribeVpcs"",
            ""ec2:DescribeVpcAttribute"",
            ""ec2:DescribeSubnets"",
            ""ec2:DescribeSecurityGroups"",
            ""ec2:DescribeNetworkInterfaces"",
            ""ec2:CreateNetworkInterface"",
            ""ec2:CreateNetworkInterfacePermission"",
            ""ec2:DeleteNetworkInterface""
          ],
          ""Resource"": [
            ""*""
          ]
        }}
  ]
}}
";
        }),
    });

    var test = new Aws.Kinesis.FirehoseDeliveryStream("test", new()
    {
        Destination = "opensearch",
        S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
        {
            RoleArn = aws_iam_role.Firehose.Arn,
            BucketArn = aws_s3_bucket.Bucket.Arn,
        },
        OpensearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs
        {
            DomainArn = testCluster.Arn,
            RoleArn = aws_iam_role.Firehose.Arn,
            IndexName = "test",
            VpcConfig = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs
            {
                SubnetIds = new[]
                {
                    aws_subnet.First.Id,
                    aws_subnet.Second.Id,
                },
                SecurityGroupIds = new[]
                {
                    aws_security_group.First.Id,
                },
                RoleArn = aws_iam_role.Firehose.Arn,
            },
        },
    }, new CustomResourceOptions
    {
        DependsOn = new[]
        {
            firehose_opensearch,
        },
    });

});
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/opensearch"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		testCluster, err := opensearch.NewDomain(ctx, "testCluster", &opensearch.DomainArgs{
			ClusterConfig: &opensearch.DomainClusterConfigArgs{
				InstanceCount:        pulumi.Int(2),
				ZoneAwarenessEnabled: pulumi.Bool(true),
				InstanceType:         pulumi.String("m4.large.search"),
			},
			EbsOptions: &opensearch.DomainEbsOptionsArgs{
				EbsEnabled: pulumi.Bool(true),
				VolumeSize: pulumi.Int(10),
			},
			VpcOptions: &opensearch.DomainVpcOptionsArgs{
				SecurityGroupIds: pulumi.StringArray{
					aws_security_group.First.Id,
				},
				SubnetIds: pulumi.StringArray{
					aws_subnet.First.Id,
					aws_subnet.Second.Id,
				},
			},
		})
		if err != nil {
			return err
		}
		_, err = iam.NewRolePolicy(ctx, "firehose-opensearch", &iam.RolePolicyArgs{
			Role: pulumi.Any(aws_iam_role.Firehose.Id),
			Policy: pulumi.All(testCluster.Arn, testCluster.Arn).ApplyT(func(_args []interface{}) (string, error) {
				testClusterArn := _args[0].(string)
				testClusterArn1 := _args[1].(string)
				return fmt.Sprintf(`{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Action": [
        "es:*"
      ],
      "Resource": [
        "%v",
        "%v/*"
      ]
        },
        {
          "Effect": "Allow",
          "Action": [
            "ec2:DescribeVpcs",
            "ec2:DescribeVpcAttribute",
            "ec2:DescribeSubnets",
            "ec2:DescribeSecurityGroups",
            "ec2:DescribeNetworkInterfaces",
            "ec2:CreateNetworkInterface",
            "ec2:CreateNetworkInterfacePermission",
            "ec2:DeleteNetworkInterface"
          ],
          "Resource": [
            "*"
          ]
        }
  ]
}
`, testClusterArn, testClusterArn1), nil
			}).(pulumi.StringOutput),
		})
		if err != nil {
			return err
		}
		_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test", &kinesis.FirehoseDeliveryStreamArgs{
			Destination: pulumi.String("opensearch"),
			S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
				RoleArn:   pulumi.Any(aws_iam_role.Firehose.Arn),
				BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
			},
			OpensearchConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs{
				DomainArn: testCluster.Arn,
				RoleArn:   pulumi.Any(aws_iam_role.Firehose.Arn),
				IndexName: pulumi.String("test"),
				VpcConfig: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs{
					SubnetIds: pulumi.StringArray{
						aws_subnet.First.Id,
						aws_subnet.Second.Id,
					},
					SecurityGroupIds: pulumi.StringArray{
						aws_security_group.First.Id,
					},
					RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
				},
			},
		}, pulumi.DependsOn([]pulumi.Resource{
			firehose_opensearch,
		}))
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.opensearch.Domain;
import com.pulumi.aws.opensearch.DomainArgs;
import com.pulumi.aws.opensearch.inputs.DomainClusterConfigArgs;
import com.pulumi.aws.opensearch.inputs.DomainEbsOptionsArgs;
import com.pulumi.aws.opensearch.inputs.DomainVpcOptionsArgs;
import com.pulumi.aws.iam.RolePolicy;
import com.pulumi.aws.iam.RolePolicyArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var testCluster = new Domain("testCluster", DomainArgs.builder()        
            .clusterConfig(DomainClusterConfigArgs.builder()
                .instanceCount(2)
                .zoneAwarenessEnabled(true)
                .instanceType("m4.large.search")
                .build())
            .ebsOptions(DomainEbsOptionsArgs.builder()
                .ebsEnabled(true)
                .volumeSize(10)
                .build())
            .vpcOptions(DomainVpcOptionsArgs.builder()
                .securityGroupIds(aws_security_group.first().id())
                .subnetIds(                
                    aws_subnet.first().id(),
                    aws_subnet.second().id())
                .build())
            .build());

        var firehose_opensearch = new RolePolicy("firehose-opensearch", RolePolicyArgs.builder()        
            .role(aws_iam_role.firehose().id())
            .policy(Output.tuple(testCluster.arn(), testCluster.arn()).applyValue(values -> {
                var testClusterArn = values.t1;
                var testClusterArn1 = values.t2;
                return """
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Action": [
        "es:*"
      ],
      "Resource": [
        "%s",
        "%s/*"
      ]
        },
        {
          "Effect": "Allow",
          "Action": [
            "ec2:DescribeVpcs",
            "ec2:DescribeVpcAttribute",
            "ec2:DescribeSubnets",
            "ec2:DescribeSecurityGroups",
            "ec2:DescribeNetworkInterfaces",
            "ec2:CreateNetworkInterface",
            "ec2:CreateNetworkInterfacePermission",
            "ec2:DeleteNetworkInterface"
          ],
          "Resource": [
            "*"
          ]
        }
  ]
}
", testClusterArn,testClusterArn1);
            }))
            .build());

        var test = new FirehoseDeliveryStream("test", FirehoseDeliveryStreamArgs.builder()        
            .destination("opensearch")
            .s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
                .roleArn(aws_iam_role.firehose().arn())
                .bucketArn(aws_s3_bucket.bucket().arn())
                .build())
            .opensearchConfiguration(FirehoseDeliveryStreamOpensearchConfigurationArgs.builder()
                .domainArn(testCluster.arn())
                .roleArn(aws_iam_role.firehose().arn())
                .indexName("test")
                .vpcConfig(FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs.builder()
                    .subnetIds(                    
                        aws_subnet.first().id(),
                        aws_subnet.second().id())
                    .securityGroupIds(aws_security_group.first().id())
                    .roleArn(aws_iam_role.firehose().arn())
                    .build())
                .build())
            .build(), CustomResourceOptions.builder()
                .dependsOn(firehose_opensearch)
                .build());

    }
}
import pulumi
import pulumi_aws as aws

test_cluster = aws.opensearch.Domain("testCluster",
    cluster_config=aws.opensearch.DomainClusterConfigArgs(
        instance_count=2,
        zone_awareness_enabled=True,
        instance_type="m4.large.search",
    ),
    ebs_options=aws.opensearch.DomainEbsOptionsArgs(
        ebs_enabled=True,
        volume_size=10,
    ),
    vpc_options=aws.opensearch.DomainVpcOptionsArgs(
        security_group_ids=[aws_security_group["first"]["id"]],
        subnet_ids=[
            aws_subnet["first"]["id"],
            aws_subnet["second"]["id"],
        ],
    ))
firehose_opensearch = aws.iam.RolePolicy("firehose-opensearch",
    role=aws_iam_role["firehose"]["id"],
    policy=pulumi.Output.all(test_cluster.arn, test_cluster.arn).apply(lambda testClusterArn, testClusterArn1: f"""{{
  "Version": "2012-10-17",
  "Statement": [
    {{
      "Effect": "Allow",
      "Action": [
        "es:*"
      ],
      "Resource": [
        "{test_cluster_arn}",
        "{test_cluster_arn1}/*"
      ]
        }},
        {{
          "Effect": "Allow",
          "Action": [
            "ec2:DescribeVpcs",
            "ec2:DescribeVpcAttribute",
            "ec2:DescribeSubnets",
            "ec2:DescribeSecurityGroups",
            "ec2:DescribeNetworkInterfaces",
            "ec2:CreateNetworkInterface",
            "ec2:CreateNetworkInterfacePermission",
            "ec2:DeleteNetworkInterface"
          ],
          "Resource": [
            "*"
          ]
        }}
  ]
}}
"""))
test = aws.kinesis.FirehoseDeliveryStream("test",
    destination="opensearch",
    s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
        role_arn=aws_iam_role["firehose"]["arn"],
        bucket_arn=aws_s3_bucket["bucket"]["arn"],
    ),
    opensearch_configuration=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs(
        domain_arn=test_cluster.arn,
        role_arn=aws_iam_role["firehose"]["arn"],
        index_name="test",
        vpc_config=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs(
            subnet_ids=[
                aws_subnet["first"]["id"],
                aws_subnet["second"]["id"],
            ],
            security_group_ids=[aws_security_group["first"]["id"]],
            role_arn=aws_iam_role["firehose"]["arn"],
        ),
    ),
    opts=pulumi.ResourceOptions(depends_on=[firehose_opensearch]))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const testCluster = new aws.opensearch.Domain("testCluster", {
    clusterConfig: {
        instanceCount: 2,
        zoneAwarenessEnabled: true,
        instanceType: "m4.large.search",
    },
    ebsOptions: {
        ebsEnabled: true,
        volumeSize: 10,
    },
    vpcOptions: {
        securityGroupIds: [aws_security_group.first.id],
        subnetIds: [
            aws_subnet.first.id,
            aws_subnet.second.id,
        ],
    },
});
const firehose_opensearch = new aws.iam.RolePolicy("firehose-opensearch", {
    role: aws_iam_role.firehose.id,
    policy: pulumi.interpolate`{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Action": [
        "es:*"
      ],
      "Resource": [
        "${testCluster.arn}",
        "${testCluster.arn}/*"
      ]
        },
        {
          "Effect": "Allow",
          "Action": [
            "ec2:DescribeVpcs",
            "ec2:DescribeVpcAttribute",
            "ec2:DescribeSubnets",
            "ec2:DescribeSecurityGroups",
            "ec2:DescribeNetworkInterfaces",
            "ec2:CreateNetworkInterface",
            "ec2:CreateNetworkInterfacePermission",
            "ec2:DeleteNetworkInterface"
          ],
          "Resource": [
            "*"
          ]
        }
  ]
}
`,
});
const test = new aws.kinesis.FirehoseDeliveryStream("test", {
    destination: "opensearch",
    s3Configuration: {
        roleArn: aws_iam_role.firehose.arn,
        bucketArn: aws_s3_bucket.bucket.arn,
    },
    opensearchConfiguration: {
        domainArn: testCluster.arn,
        roleArn: aws_iam_role.firehose.arn,
        indexName: "test",
        vpcConfig: {
            subnetIds: [
                aws_subnet.first.id,
                aws_subnet.second.id,
            ],
            securityGroupIds: [aws_security_group.first.id],
            roleArn: aws_iam_role.firehose.arn,
        },
    },
}, {
    dependsOn: [firehose_opensearch],
});
resources:
  testCluster:
    type: aws:opensearch:Domain
    properties:
      clusterConfig:
        instanceCount: 2
        zoneAwarenessEnabled: true
        instanceType: m4.large.search
      ebsOptions:
        ebsEnabled: true
        volumeSize: 10
      vpcOptions:
        securityGroupIds:
          - ${aws_security_group.first.id}
        subnetIds:
          - ${aws_subnet.first.id}
          - ${aws_subnet.second.id}
  firehose-opensearch:
    type: aws:iam:RolePolicy
    properties:
      role: ${aws_iam_role.firehose.id}
      policy: |
        {
          "Version": "2012-10-17",
          "Statement": [
            {
              "Effect": "Allow",
              "Action": [
                "es:*"
              ],
              "Resource": [
                "${testCluster.arn}",
                "${testCluster.arn}/*"
              ]
                },
                {
                  "Effect": "Allow",
                  "Action": [
                    "ec2:DescribeVpcs",
                    "ec2:DescribeVpcAttribute",
                    "ec2:DescribeSubnets",
                    "ec2:DescribeSecurityGroups",
                    "ec2:DescribeNetworkInterfaces",
                    "ec2:CreateNetworkInterface",
                    "ec2:CreateNetworkInterfacePermission",
                    "ec2:DeleteNetworkInterface"
                  ],
                  "Resource": [
                    "*"
                  ]
                }
          ]
        }        
  test:
    type: aws:kinesis:FirehoseDeliveryStream
    properties:
      destination: opensearch
      s3Configuration:
        roleArn: ${aws_iam_role.firehose.arn}
        bucketArn: ${aws_s3_bucket.bucket.arn}
      opensearchConfiguration:
        domainArn: ${testCluster.arn}
        roleArn: ${aws_iam_role.firehose.arn}
        indexName: test
        vpcConfig:
          subnetIds:
            - ${aws_subnet.first.id}
            - ${aws_subnet.second.id}
          securityGroupIds:
            - ${aws_security_group.first.id}
          roleArn: ${aws_iam_role.firehose.arn}
    options:
      dependson:
        - ${["firehose-opensearch"]}

Splunk Destination

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
    {
        Destination = "splunk",
        S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
        {
            RoleArn = aws_iam_role.Firehose.Arn,
            BucketArn = aws_s3_bucket.Bucket.Arn,
            BufferSize = 10,
            BufferInterval = 400,
            CompressionFormat = "GZIP",
        },
        SplunkConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationArgs
        {
            HecEndpoint = "https://http-inputs-mydomain.splunkcloud.com:443",
            HecToken = "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
            HecAcknowledgmentTimeout = 600,
            HecEndpointType = "Event",
            S3BackupMode = "FailedEventsOnly",
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
			Destination: pulumi.String("splunk"),
			S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
				RoleArn:           pulumi.Any(aws_iam_role.Firehose.Arn),
				BucketArn:         pulumi.Any(aws_s3_bucket.Bucket.Arn),
				BufferSize:        pulumi.Int(10),
				BufferInterval:    pulumi.Int(400),
				CompressionFormat: pulumi.String("GZIP"),
			},
			SplunkConfiguration: &kinesis.FirehoseDeliveryStreamSplunkConfigurationArgs{
				HecEndpoint:              pulumi.String("https://http-inputs-mydomain.splunkcloud.com:443"),
				HecToken:                 pulumi.String("51D4DA16-C61B-4F5F-8EC7-ED4301342A4A"),
				HecAcknowledgmentTimeout: pulumi.Int(600),
				HecEndpointType:          pulumi.String("Event"),
				S3BackupMode:             pulumi.String("FailedEventsOnly"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamSplunkConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()        
            .destination("splunk")
            .s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
                .roleArn(aws_iam_role.firehose().arn())
                .bucketArn(aws_s3_bucket.bucket().arn())
                .bufferSize(10)
                .bufferInterval(400)
                .compressionFormat("GZIP")
                .build())
            .splunkConfiguration(FirehoseDeliveryStreamSplunkConfigurationArgs.builder()
                .hecEndpoint("https://http-inputs-mydomain.splunkcloud.com:443")
                .hecToken("51D4DA16-C61B-4F5F-8EC7-ED4301342A4A")
                .hecAcknowledgmentTimeout(600)
                .hecEndpointType("Event")
                .s3BackupMode("FailedEventsOnly")
                .build())
            .build());

    }
}
import pulumi
import pulumi_aws as aws

test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
    destination="splunk",
    s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
        role_arn=aws_iam_role["firehose"]["arn"],
        bucket_arn=aws_s3_bucket["bucket"]["arn"],
        buffer_size=10,
        buffer_interval=400,
        compression_format="GZIP",
    ),
    splunk_configuration=aws.kinesis.FirehoseDeliveryStreamSplunkConfigurationArgs(
        hec_endpoint="https://http-inputs-mydomain.splunkcloud.com:443",
        hec_token="51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
        hec_acknowledgment_timeout=600,
        hec_endpoint_type="Event",
        s3_backup_mode="FailedEventsOnly",
    ))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
    destination: "splunk",
    s3Configuration: {
        roleArn: aws_iam_role.firehose.arn,
        bucketArn: aws_s3_bucket.bucket.arn,
        bufferSize: 10,
        bufferInterval: 400,
        compressionFormat: "GZIP",
    },
    splunkConfiguration: {
        hecEndpoint: "https://http-inputs-mydomain.splunkcloud.com:443",
        hecToken: "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
        hecAcknowledgmentTimeout: 600,
        hecEndpointType: "Event",
        s3BackupMode: "FailedEventsOnly",
    },
});
resources:
  testStream:
    type: aws:kinesis:FirehoseDeliveryStream
    properties:
      destination: splunk
      s3Configuration:
        roleArn: ${aws_iam_role.firehose.arn}
        bucketArn: ${aws_s3_bucket.bucket.arn}
        bufferSize: 10
        bufferInterval: 400
        compressionFormat: GZIP
      splunkConfiguration:
        hecEndpoint: https://http-inputs-mydomain.splunkcloud.com:443
        hecToken: 51D4DA16-C61B-4F5F-8EC7-ED4301342A4A
        hecAcknowledgmentTimeout: 600
        hecEndpointType: Event
        s3BackupMode: FailedEventsOnly

HTTP Endpoint (e.g., New Relic) Destination

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
    {
        Destination = "http_endpoint",
        S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
        {
            RoleArn = aws_iam_role.Firehose.Arn,
            BucketArn = aws_s3_bucket.Bucket.Arn,
            BufferSize = 10,
            BufferInterval = 400,
            CompressionFormat = "GZIP",
        },
        HttpEndpointConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationArgs
        {
            Url = "https://aws-api.newrelic.com/firehose/v1",
            Name = "New Relic",
            AccessKey = "my-key",
            BufferingSize = 15,
            BufferingInterval = 600,
            RoleArn = aws_iam_role.Firehose.Arn,
            S3BackupMode = "FailedDataOnly",
            RequestConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs
            {
                ContentEncoding = "GZIP",
                CommonAttributes = new[]
                {
                    new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs
                    {
                        Name = "testname",
                        Value = "testvalue",
                    },
                    new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs
                    {
                        Name = "testname2",
                        Value = "testvalue2",
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
			Destination: pulumi.String("http_endpoint"),
			S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
				RoleArn:           pulumi.Any(aws_iam_role.Firehose.Arn),
				BucketArn:         pulumi.Any(aws_s3_bucket.Bucket.Arn),
				BufferSize:        pulumi.Int(10),
				BufferInterval:    pulumi.Int(400),
				CompressionFormat: pulumi.String("GZIP"),
			},
			HttpEndpointConfiguration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationArgs{
				Url:               pulumi.String("https://aws-api.newrelic.com/firehose/v1"),
				Name:              pulumi.String("New Relic"),
				AccessKey:         pulumi.String("my-key"),
				BufferingSize:     pulumi.Int(15),
				BufferingInterval: pulumi.Int(600),
				RoleArn:           pulumi.Any(aws_iam_role.Firehose.Arn),
				S3BackupMode:      pulumi.String("FailedDataOnly"),
				RequestConfiguration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs{
					ContentEncoding: pulumi.String("GZIP"),
					CommonAttributes: kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArray{
						&kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs{
							Name:  pulumi.String("testname"),
							Value: pulumi.String("testvalue"),
						},
						&kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs{
							Name:  pulumi.String("testname2"),
							Value: pulumi.String("testvalue2"),
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamHttpEndpointConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()        
            .destination("http_endpoint")
            .s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
                .roleArn(aws_iam_role.firehose().arn())
                .bucketArn(aws_s3_bucket.bucket().arn())
                .bufferSize(10)
                .bufferInterval(400)
                .compressionFormat("GZIP")
                .build())
            .httpEndpointConfiguration(FirehoseDeliveryStreamHttpEndpointConfigurationArgs.builder()
                .url("https://aws-api.newrelic.com/firehose/v1")
                .name("New Relic")
                .accessKey("my-key")
                .bufferingSize(15)
                .bufferingInterval(600)
                .roleArn(aws_iam_role.firehose().arn())
                .s3BackupMode("FailedDataOnly")
                .requestConfiguration(FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs.builder()
                    .contentEncoding("GZIP")
                    .commonAttributes(                    
                        FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs.builder()
                            .name("testname")
                            .value("testvalue")
                            .build(),
                        FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs.builder()
                            .name("testname2")
                            .value("testvalue2")
                            .build())
                    .build())
                .build())
            .build());

    }
}
import pulumi
import pulumi_aws as aws

test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
    destination="http_endpoint",
    s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
        role_arn=aws_iam_role["firehose"]["arn"],
        bucket_arn=aws_s3_bucket["bucket"]["arn"],
        buffer_size=10,
        buffer_interval=400,
        compression_format="GZIP",
    ),
    http_endpoint_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationArgs(
        url="https://aws-api.newrelic.com/firehose/v1",
        name="New Relic",
        access_key="my-key",
        buffering_size=15,
        buffering_interval=600,
        role_arn=aws_iam_role["firehose"]["arn"],
        s3_backup_mode="FailedDataOnly",
        request_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs(
            content_encoding="GZIP",
            common_attributes=[
                aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs(
                    name="testname",
                    value="testvalue",
                ),
                aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs(
                    name="testname2",
                    value="testvalue2",
                ),
            ],
        ),
    ))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
    destination: "http_endpoint",
    s3Configuration: {
        roleArn: aws_iam_role.firehose.arn,
        bucketArn: aws_s3_bucket.bucket.arn,
        bufferSize: 10,
        bufferInterval: 400,
        compressionFormat: "GZIP",
    },
    httpEndpointConfiguration: {
        url: "https://aws-api.newrelic.com/firehose/v1",
        name: "New Relic",
        accessKey: "my-key",
        bufferingSize: 15,
        bufferingInterval: 600,
        roleArn: aws_iam_role.firehose.arn,
        s3BackupMode: "FailedDataOnly",
        requestConfiguration: {
            contentEncoding: "GZIP",
            commonAttributes: [
                {
                    name: "testname",
                    value: "testvalue",
                },
                {
                    name: "testname2",
                    value: "testvalue2",
                },
            ],
        },
    },
});
resources:
  testStream:
    type: aws:kinesis:FirehoseDeliveryStream
    properties:
      destination: http_endpoint
      s3Configuration:
        roleArn: ${aws_iam_role.firehose.arn}
        bucketArn: ${aws_s3_bucket.bucket.arn}
        bufferSize: 10
        bufferInterval: 400
        compressionFormat: GZIP
      httpEndpointConfiguration:
        url: https://aws-api.newrelic.com/firehose/v1
        name: New Relic
        accessKey: my-key
        bufferingSize: 15
        bufferingInterval: 600
        roleArn: ${aws_iam_role.firehose.arn}
        s3BackupMode: FailedDataOnly
        requestConfiguration:
          contentEncoding: GZIP
          commonAttributes:
            - name: testname
              value: testvalue
            - name: testname2
              value: testvalue2

Create FirehoseDeliveryStream Resource

new FirehoseDeliveryStream(name: string, args: FirehoseDeliveryStreamArgs, opts?: CustomResourceOptions);
@overload
def FirehoseDeliveryStream(resource_name: str,
                           opts: Optional[ResourceOptions] = None,
                           arn: Optional[str] = None,
                           destination: Optional[str] = None,
                           destination_id: Optional[str] = None,
                           elasticsearch_configuration: Optional[FirehoseDeliveryStreamElasticsearchConfigurationArgs] = None,
                           extended_s3_configuration: Optional[FirehoseDeliveryStreamExtendedS3ConfigurationArgs] = None,
                           http_endpoint_configuration: Optional[FirehoseDeliveryStreamHttpEndpointConfigurationArgs] = None,
                           kinesis_source_configuration: Optional[FirehoseDeliveryStreamKinesisSourceConfigurationArgs] = None,
                           name: Optional[str] = None,
                           opensearch_configuration: Optional[FirehoseDeliveryStreamOpensearchConfigurationArgs] = None,
                           redshift_configuration: Optional[FirehoseDeliveryStreamRedshiftConfigurationArgs] = None,
                           s3_configuration: Optional[FirehoseDeliveryStreamS3ConfigurationArgs] = None,
                           server_side_encryption: Optional[FirehoseDeliveryStreamServerSideEncryptionArgs] = None,
                           splunk_configuration: Optional[FirehoseDeliveryStreamSplunkConfigurationArgs] = None,
                           tags: Optional[Mapping[str, str]] = None,
                           version_id: Optional[str] = None)
@overload
def FirehoseDeliveryStream(resource_name: str,
                           args: FirehoseDeliveryStreamArgs,
                           opts: Optional[ResourceOptions] = None)
func NewFirehoseDeliveryStream(ctx *Context, name string, args FirehoseDeliveryStreamArgs, opts ...ResourceOption) (*FirehoseDeliveryStream, error)
public FirehoseDeliveryStream(string name, FirehoseDeliveryStreamArgs args, CustomResourceOptions? opts = null)
public FirehoseDeliveryStream(String name, FirehoseDeliveryStreamArgs args)
public FirehoseDeliveryStream(String name, FirehoseDeliveryStreamArgs args, CustomResourceOptions options)
type: aws:kinesis:FirehoseDeliveryStream
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

name string
The unique name of the resource.
args FirehoseDeliveryStreamArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name str
The unique name of the resource.
args FirehoseDeliveryStreamArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name string
The unique name of the resource.
args FirehoseDeliveryStreamArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name string
The unique name of the resource.
args FirehoseDeliveryStreamArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name String
The unique name of the resource.
args FirehoseDeliveryStreamArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

FirehoseDeliveryStream Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

The FirehoseDeliveryStream resource accepts the following input properties:

Destination string

This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch.

Arn string

The Amazon Resource Name (ARN) specifying the Stream

DestinationId string
ElasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfigurationArgs

Configuration options if elasticsearch is the destination. More details are given below.

ExtendedS3Configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

Enhanced configuration options for the s3 destination. More details are given below.

HttpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

KinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

Name string

A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

OpensearchConfiguration FirehoseDeliveryStreamOpensearchConfigurationArgs

Configuration options if opensearch is the destination. More details are given below.

RedshiftConfiguration FirehoseDeliveryStreamRedshiftConfigurationArgs

Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

S3Configuration FirehoseDeliveryStreamS3ConfigurationArgs

Required for non-S3 destinations. For S3 destination, use extended_s3_configuration instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.

ServerSideEncryption FirehoseDeliveryStreamServerSideEncryptionArgs

Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

SplunkConfiguration FirehoseDeliveryStreamSplunkConfigurationArgs

Configuration options if splunk is the destination. More details are given below.

Tags Dictionary<string, string>

A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

VersionId string

Specifies the table version for the output data schema. Defaults to LATEST.

Destination string

This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch.

Arn string

The Amazon Resource Name (ARN) specifying the Stream

DestinationId string
ElasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfigurationArgs

Configuration options if elasticsearch is the destination. More details are given below.

ExtendedS3Configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

Enhanced configuration options for the s3 destination. More details are given below.

HttpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

KinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

Name string

A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

OpensearchConfiguration FirehoseDeliveryStreamOpensearchConfigurationArgs

Configuration options if opensearch is the destination. More details are given below.

RedshiftConfiguration FirehoseDeliveryStreamRedshiftConfigurationArgs

Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

S3Configuration FirehoseDeliveryStreamS3ConfigurationArgs

Required for non-S3 destinations. For S3 destination, use extended_s3_configuration instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.

ServerSideEncryption FirehoseDeliveryStreamServerSideEncryptionArgs

Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

SplunkConfiguration FirehoseDeliveryStreamSplunkConfigurationArgs

Configuration options if splunk is the destination. More details are given below.

Tags map[string]string

A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

VersionId string

Specifies the table version for the output data schema. Defaults to LATEST.

destination String

This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch.

arn String

The Amazon Resource Name (ARN) specifying the Stream

destinationId String
elasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfigurationArgs

Configuration options if elasticsearch is the destination. More details are given below.

extendedS3Configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

Enhanced configuration options for the s3 destination. More details are given below.

httpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

kinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

name String

A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

opensearchConfiguration FirehoseDeliveryStreamOpensearchConfigurationArgs

Configuration options if opensearch is the destination. More details are given below.

redshiftConfiguration FirehoseDeliveryStreamRedshiftConfigurationArgs

Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

s3Configuration FirehoseDeliveryStreamS3ConfigurationArgs

Required for non-S3 destinations. For S3 destination, use extended_s3_configuration instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.

serverSideEncryption FirehoseDeliveryStreamServerSideEncryptionArgs

Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

splunkConfiguration FirehoseDeliveryStreamSplunkConfigurationArgs

Configuration options if splunk is the destination. More details are given below.

tags Map<String,String>

A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

versionId String

Specifies the table version for the output data schema. Defaults to LATEST.

destination string

This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch.

arn string

The Amazon Resource Name (ARN) specifying the Stream

destinationId string
elasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfigurationArgs

Configuration options if elasticsearch is the destination. More details are given below.

extendedS3Configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

Enhanced configuration options for the s3 destination. More details are given below.

httpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

kinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

name string

A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

opensearchConfiguration FirehoseDeliveryStreamOpensearchConfigurationArgs

Configuration options if opensearch is the destination. More details are given below.

redshiftConfiguration FirehoseDeliveryStreamRedshiftConfigurationArgs

Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

s3Configuration FirehoseDeliveryStreamS3ConfigurationArgs

Required for non-S3 destinations. For S3 destination, use extended_s3_configuration instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.

serverSideEncryption FirehoseDeliveryStreamServerSideEncryptionArgs

Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

splunkConfiguration FirehoseDeliveryStreamSplunkConfigurationArgs

Configuration options if splunk is the destination. More details are given below.

tags {[key: string]: string}

A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

versionId string

Specifies the table version for the output data schema. Defaults to LATEST.

destination str

This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch.

arn str

The Amazon Resource Name (ARN) specifying the Stream

destination_id str
elasticsearch_configuration FirehoseDeliveryStreamElasticsearchConfigurationArgs

Configuration options if elasticsearch is the destination. More details are given below.

extended_s3_configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

Enhanced configuration options for the s3 destination. More details are given below.

http_endpoint_configuration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

kinesis_source_configuration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

name str

A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

opensearch_configuration FirehoseDeliveryStreamOpensearchConfigurationArgs

Configuration options if opensearch is the destination. More details are given below.

redshift_configuration FirehoseDeliveryStreamRedshiftConfigurationArgs

Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

s3_configuration FirehoseDeliveryStreamS3ConfigurationArgs

Required for non-S3 destinations. For S3 destination, use extended_s3_configuration instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.

server_side_encryption FirehoseDeliveryStreamServerSideEncryptionArgs

Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

splunk_configuration FirehoseDeliveryStreamSplunkConfigurationArgs

Configuration options if splunk is the destination. More details are given below.

tags Mapping[str, str]

A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

version_id str

Specifies the table version for the output data schema. Defaults to LATEST.

destination String

This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch.

arn String

The Amazon Resource Name (ARN) specifying the Stream

destinationId String
elasticsearchConfiguration Property Map

Configuration options if elasticsearch is the destination. More details are given below.

extendedS3Configuration Property Map

Enhanced configuration options for the s3 destination. More details are given below.

httpEndpointConfiguration Property Map

Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

kinesisSourceConfiguration Property Map

Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

name String

A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

opensearchConfiguration Property Map

Configuration options if opensearch is the destination. More details are given below.

redshiftConfiguration Property Map

Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

s3Configuration Property Map

Required for non-S3 destinations. For S3 destination, use extended_s3_configuration instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.

serverSideEncryption Property Map

Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

splunkConfiguration Property Map

Configuration options if splunk is the destination. More details are given below.

tags Map<String>

A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

versionId String

Specifies the table version for the output data schema. Defaults to LATEST.

Outputs

All input properties are implicitly available as output properties. Additionally, the FirehoseDeliveryStream resource produces the following output properties:

Id string

The provider-assigned unique ID for this managed resource.

TagsAll Dictionary<string, string>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Id string

The provider-assigned unique ID for this managed resource.

TagsAll map[string]string

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

id String

The provider-assigned unique ID for this managed resource.

tagsAll Map<String,String>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

id string

The provider-assigned unique ID for this managed resource.

tagsAll {[key: string]: string}

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

id str

The provider-assigned unique ID for this managed resource.

tags_all Mapping[str, str]

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

id String

The provider-assigned unique ID for this managed resource.

tagsAll Map<String>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Look up Existing FirehoseDeliveryStream Resource

Get an existing FirehoseDeliveryStream resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: FirehoseDeliveryStreamState, opts?: CustomResourceOptions): FirehoseDeliveryStream
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        arn: Optional[str] = None,
        destination: Optional[str] = None,
        destination_id: Optional[str] = None,
        elasticsearch_configuration: Optional[FirehoseDeliveryStreamElasticsearchConfigurationArgs] = None,
        extended_s3_configuration: Optional[FirehoseDeliveryStreamExtendedS3ConfigurationArgs] = None,
        http_endpoint_configuration: Optional[FirehoseDeliveryStreamHttpEndpointConfigurationArgs] = None,
        kinesis_source_configuration: Optional[FirehoseDeliveryStreamKinesisSourceConfigurationArgs] = None,
        name: Optional[str] = None,
        opensearch_configuration: Optional[FirehoseDeliveryStreamOpensearchConfigurationArgs] = None,
        redshift_configuration: Optional[FirehoseDeliveryStreamRedshiftConfigurationArgs] = None,
        s3_configuration: Optional[FirehoseDeliveryStreamS3ConfigurationArgs] = None,
        server_side_encryption: Optional[FirehoseDeliveryStreamServerSideEncryptionArgs] = None,
        splunk_configuration: Optional[FirehoseDeliveryStreamSplunkConfigurationArgs] = None,
        tags: Optional[Mapping[str, str]] = None,
        tags_all: Optional[Mapping[str, str]] = None,
        version_id: Optional[str] = None) -> FirehoseDeliveryStream
func GetFirehoseDeliveryStream(ctx *Context, name string, id IDInput, state *FirehoseDeliveryStreamState, opts ...ResourceOption) (*FirehoseDeliveryStream, error)
public static FirehoseDeliveryStream Get(string name, Input<string> id, FirehoseDeliveryStreamState? state, CustomResourceOptions? opts = null)
public static FirehoseDeliveryStream get(String name, Output<String> id, FirehoseDeliveryStreamState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
Arn string

The Amazon Resource Name (ARN) specifying the Stream

Destination string

This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch.

DestinationId string
ElasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfigurationArgs

Configuration options if elasticsearch is the destination. More details are given below.

ExtendedS3Configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

Enhanced configuration options for the s3 destination. More details are given below.

HttpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

KinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

Name string

A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

OpensearchConfiguration FirehoseDeliveryStreamOpensearchConfigurationArgs

Configuration options if opensearch is the destination. More details are given below.

RedshiftConfiguration FirehoseDeliveryStreamRedshiftConfigurationArgs

Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

S3Configuration FirehoseDeliveryStreamS3ConfigurationArgs

Required for non-S3 destinations. For S3 destination, use extended_s3_configuration instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.

ServerSideEncryption FirehoseDeliveryStreamServerSideEncryptionArgs

Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

SplunkConfiguration FirehoseDeliveryStreamSplunkConfigurationArgs

Configuration options if splunk is the destination. More details are given below.

Tags Dictionary<string, string>

A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

TagsAll Dictionary<string, string>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

VersionId string

Specifies the table version for the output data schema. Defaults to LATEST.

Arn string

The Amazon Resource Name (ARN) specifying the Stream

Destination string

This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch.

DestinationId string
ElasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfigurationArgs

Configuration options if elasticsearch is the destination. More details are given below.

ExtendedS3Configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

Enhanced configuration options for the s3 destination. More details are given below.

HttpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

KinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

Name string

A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

OpensearchConfiguration FirehoseDeliveryStreamOpensearchConfigurationArgs

Configuration options if opensearch is the destination. More details are given below.

RedshiftConfiguration FirehoseDeliveryStreamRedshiftConfigurationArgs

Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

S3Configuration FirehoseDeliveryStreamS3ConfigurationArgs

Required for non-S3 destinations. For S3 destination, use extended_s3_configuration instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.

ServerSideEncryption FirehoseDeliveryStreamServerSideEncryptionArgs

Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

SplunkConfiguration FirehoseDeliveryStreamSplunkConfigurationArgs

Configuration options if splunk is the destination. More details are given below.

Tags map[string]string

A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

TagsAll map[string]string

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

VersionId string

Specifies the table version for the output data schema. Defaults to LATEST.

arn String

The Amazon Resource Name (ARN) specifying the Stream

destination String

This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch.

destinationId String
elasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfigurationArgs

Configuration options if elasticsearch is the destination. More details are given below.

extendedS3Configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

Enhanced configuration options for the s3 destination. More details are given below.

httpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

kinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

name String

A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

opensearchConfiguration FirehoseDeliveryStreamOpensearchConfigurationArgs

Configuration options if opensearch is the destination. More details are given below.

redshiftConfiguration FirehoseDeliveryStreamRedshiftConfigurationArgs

Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

s3Configuration FirehoseDeliveryStreamS3ConfigurationArgs

Required for non-S3 destinations. For S3 destination, use extended_s3_configuration instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.

serverSideEncryption FirehoseDeliveryStreamServerSideEncryptionArgs

Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

splunkConfiguration FirehoseDeliveryStreamSplunkConfigurationArgs

Configuration options if splunk is the destination. More details are given below.

tags Map<String,String>

A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

tagsAll Map<String,String>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

versionId String

Specifies the table version for the output data schema. Defaults to LATEST.

arn string

The Amazon Resource Name (ARN) specifying the Stream

destination string

This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch.

destinationId string
elasticsearchConfiguration FirehoseDeliveryStreamElasticsearchConfigurationArgs

Configuration options if elasticsearch is the destination. More details are given below.

extendedS3Configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

Enhanced configuration options for the s3 destination. More details are given below.

httpEndpointConfiguration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

kinesisSourceConfiguration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

name string

A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

opensearchConfiguration FirehoseDeliveryStreamOpensearchConfigurationArgs

Configuration options if opensearch is the destination. More details are given below.

redshiftConfiguration FirehoseDeliveryStreamRedshiftConfigurationArgs

Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

s3Configuration FirehoseDeliveryStreamS3ConfigurationArgs

Required for non-S3 destinations. For S3 destination, use extended_s3_configuration instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.

serverSideEncryption FirehoseDeliveryStreamServerSideEncryptionArgs

Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

splunkConfiguration FirehoseDeliveryStreamSplunkConfigurationArgs

Configuration options if splunk is the destination. More details are given below.

tags {[key: string]: string}

A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

tagsAll {[key: string]: string}

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

versionId string

Specifies the table version for the output data schema. Defaults to LATEST.

arn str

The Amazon Resource Name (ARN) specifying the Stream

destination str

This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch.

destination_id str
elasticsearch_configuration FirehoseDeliveryStreamElasticsearchConfigurationArgs

Configuration options if elasticsearch is the destination. More details are given below.

extended_s3_configuration FirehoseDeliveryStreamExtendedS3ConfigurationArgs

Enhanced configuration options for the s3 destination. More details are given below.

http_endpoint_configuration FirehoseDeliveryStreamHttpEndpointConfigurationArgs

Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

kinesis_source_configuration FirehoseDeliveryStreamKinesisSourceConfigurationArgs

Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

name str

A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

opensearch_configuration FirehoseDeliveryStreamOpensearchConfigurationArgs

Configuration options if opensearch is the destination. More details are given below.

redshift_configuration FirehoseDeliveryStreamRedshiftConfigurationArgs

Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

s3_configuration FirehoseDeliveryStreamS3ConfigurationArgs

Required for non-S3 destinations. For S3 destination, use extended_s3_configuration instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.

server_side_encryption FirehoseDeliveryStreamServerSideEncryptionArgs

Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

splunk_configuration FirehoseDeliveryStreamSplunkConfigurationArgs

Configuration options if splunk is the destination. More details are given below.

tags Mapping[str, str]

A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

tags_all Mapping[str, str]

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

version_id str

Specifies the table version for the output data schema. Defaults to LATEST.

arn String

The Amazon Resource Name (ARN) specifying the Stream

destination String

This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint and opensearch.

destinationId String
elasticsearchConfiguration Property Map

Configuration options if elasticsearch is the destination. More details are given below.

extendedS3Configuration Property Map

Enhanced configuration options for the s3 destination. More details are given below.

httpEndpointConfiguration Property Map

Configuration options if http_endpoint is the destination. requires the user to also specify a s3_configuration block. More details are given below.

kinesisSourceConfiguration Property Map

Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.

name String

A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details.

opensearchConfiguration Property Map

Configuration options if opensearch is the destination. More details are given below.

redshiftConfiguration Property Map

Configuration options if redshift is the destination. Using redshift_configuration requires the user to also specify a s3_configuration block. More details are given below.

s3Configuration Property Map

Required for non-S3 destinations. For S3 destination, use extended_s3_configuration instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.

serverSideEncryption Property Map

Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.

splunkConfiguration Property Map

Configuration options if splunk is the destination. More details are given below.

tags Map<String>

A map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

tagsAll Map<String>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

versionId String

Specifies the table version for the output data schema. Defaults to LATEST.

Supporting Types

FirehoseDeliveryStreamElasticsearchConfiguration

IndexName string

The Elasticsearch index name.

RoleArn string

The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*.

BufferingInterval int

Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

BufferingSize int

Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.

CloudwatchLoggingOptions FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions

The CloudWatch Logging Options for the delivery stream. More details are given below

ClusterEndpoint string

The endpoint to use when communicating with the cluster. Conflicts with domain_arn.

DomainArn string

The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint.

IndexRotationPeriod string

The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay.

ProcessingConfiguration FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration

The data processing configuration. More details are given below.

RetryDuration int

After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.

S3BackupMode string

Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly.

TypeName string

The Elasticsearch type name with maximum length of 100 characters.

VpcConfig FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig

The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below

IndexName string

The Elasticsearch index name.

RoleArn string

The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*.

BufferingInterval int

Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

BufferingSize int

Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.

CloudwatchLoggingOptions FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions

The CloudWatch Logging Options for the delivery stream. More details are given below

ClusterEndpoint string

The endpoint to use when communicating with the cluster. Conflicts with domain_arn.

DomainArn string

The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint.

IndexRotationPeriod string

The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay.

ProcessingConfiguration FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration

The data processing configuration. More details are given below.

RetryDuration int

After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.

S3BackupMode string

Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly.

TypeName string

The Elasticsearch type name with maximum length of 100 characters.

VpcConfig FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig

The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below

indexName String

The Elasticsearch index name.

roleArn String

The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*.

bufferingInterval Integer

Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

bufferingSize Integer

Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.

cloudwatchLoggingOptions FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions

The CloudWatch Logging Options for the delivery stream. More details are given below

clusterEndpoint String

The endpoint to use when communicating with the cluster. Conflicts with domain_arn.

domainArn String

The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint.

indexRotationPeriod String

The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay.

processingConfiguration FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration

The data processing configuration. More details are given below.

retryDuration Integer

After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.

s3BackupMode String

Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly.

typeName String

The Elasticsearch type name with maximum length of 100 characters.

vpcConfig FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig

The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below

indexName string

The Elasticsearch index name.

roleArn string

The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*.

bufferingInterval number

Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

bufferingSize number

Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.

cloudwatchLoggingOptions FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions

The CloudWatch Logging Options for the delivery stream. More details are given below

clusterEndpoint string

The endpoint to use when communicating with the cluster. Conflicts with domain_arn.

domainArn string

The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint.

indexRotationPeriod string

The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay.

processingConfiguration FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration

The data processing configuration. More details are given below.

retryDuration number

After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.

s3BackupMode string

Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly.

typeName string

The Elasticsearch type name with maximum length of 100 characters.

vpcConfig FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig

The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below

index_name str

The Elasticsearch index name.

role_arn str

The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*.

buffering_interval int

Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

buffering_size int

Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.

cloudwatch_logging_options FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions

The CloudWatch Logging Options for the delivery stream. More details are given below

cluster_endpoint str

The endpoint to use when communicating with the cluster. Conflicts with domain_arn.

domain_arn str

The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint.

index_rotation_period str

The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay.

processing_configuration FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration

The data processing configuration. More details are given below.

retry_duration int

After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.

s3_backup_mode str

Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly.

type_name str

The Elasticsearch type name with maximum length of 100 characters.

vpc_config FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig

The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below

indexName String

The Elasticsearch index name.

roleArn String

The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*.

bufferingInterval Number

Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.

bufferingSize Number

Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.

cloudwatchLoggingOptions Property Map

The CloudWatch Logging Options for the delivery stream. More details are given below

clusterEndpoint String

The endpoint to use when communicating with the cluster. Conflicts with domain_arn.

domainArn String

The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint.

indexRotationPeriod String

The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay.

processingConfiguration Property Map

The data processing configuration. More details are given below.

retryDuration Number

After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.

s3BackupMode String

Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly.

typeName String

The Elasticsearch type name with maximum length of 100 characters.

vpcConfig Property Map

The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below

FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions

Enabled bool

Enables or disables the logging. Defaults to false.

LogGroupName string

The CloudWatch group name for logging. This value is required if enabled is true.

LogStreamName string

The CloudWatch log stream name for logging. This value is required if enabled is true.

Enabled bool

Enables or disables the logging. Defaults to false.

LogGroupName string

The CloudWatch group name for logging. This value is required if enabled is true.

LogStreamName string

The CloudWatch log stream name for logging. This value is required if enabled is true.

enabled Boolean

Enables or disables the logging. Defaults to false.

logGroupName String

The CloudWatch group name for logging. This value is required if enabled is true.

logStreamName String

The CloudWatch log stream name for logging. This value is required if enabled is true.

enabled boolean

Enables or disables the logging. Defaults to false.

logGroupName string

The CloudWatch group name for logging. This value is required if enabled is true.

logStreamName string

The CloudWatch log stream name for logging. This value is required if enabled is true.

enabled bool

Enables or disables the logging. Defaults to false.

log_group_name str

The CloudWatch group name for logging. This value is required if enabled is true.

log_stream_name str

The CloudWatch log stream name for logging. This value is required if enabled is true.

enabled Boolean

Enables or disables the logging. Defaults to false.

logGroupName String

The CloudWatch group name for logging. This value is required if enabled is true.

logStreamName String

The CloudWatch log stream name for logging. This value is required if enabled is true.

FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration

Enabled bool

Enables or disables data processing.

Processors List<FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor>

Array of data processors. More details are given below

Enabled bool

Enables or disables data processing.

Processors []FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor

Array of data processors. More details are given below

enabled Boolean

Enables or disables data processing.

processors List<FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor>

Array of data processors. More details are given below

enabled boolean

Enables or disables data processing.

processors FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor[]

Array of data processors. More details are given below

enabled bool

Enables or disables data processing.

processors Sequence[FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor]

Array of data processors. More details are given below

enabled Boolean

Enables or disables data processing.

processors List<Property Map>

Array of data processors. More details are given below

FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor

Type string

The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

Parameters List<FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter>

Array of processor parameters. More details are given below

Type string

The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

Parameters []FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter

Array of processor parameters. More details are given below

type String

The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

parameters List<FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter>

Array of processor parameters. More details are given below

type string

The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

parameters FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter[]

Array of processor parameters. More details are given below

type str

The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

parameters Sequence[FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter]

Array of processor parameters. More details are given below

type String

The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

parameters List<Property Map>

Array of processor parameters. More details are given below

FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter

ParameterName string

Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

ParameterValue string

Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.

ParameterName string

Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

ParameterValue string

Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.

parameterName String

Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

parameterValue String

Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.

parameterName string

Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

parameterValue string

Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.

parameter_name str

Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

parameter_value str

Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.

parameterName String

Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

parameterValue String

Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.

FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig

RoleArn string

The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions

SecurityGroupIds List<string>

A list of security group IDs to associate with Kinesis Firehose.

SubnetIds List<string>

A list of subnet IDs to associate with Kinesis Firehose.

VpcId string
RoleArn string

The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions

SecurityGroupIds []string

A list of security group IDs to associate with Kinesis Firehose.

SubnetIds []string

A list of subnet IDs to associate with Kinesis Firehose.

VpcId string
roleArn String

The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions

securityGroupIds List<String>

A list of security group IDs to associate with Kinesis Firehose.

subnetIds List<String>

A list of subnet IDs to associate with Kinesis Firehose.

vpcId String
roleArn string

The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions

securityGroupIds string[]

A list of security group IDs to associate with Kinesis Firehose.

subnetIds string[]

A list of subnet IDs to associate with Kinesis Firehose.

vpcId string
role_arn str

The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions

security_group_ids Sequence[str]

A list of security group IDs to associate with Kinesis Firehose.

subnet_ids Sequence[str]

A list of subnet IDs to associate with Kinesis Firehose.

vpc_id str
roleArn String

The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions

securityGroupIds List<String>

A list of security group IDs to associate with Kinesis Firehose.

subnetIds List<String>

A list of subnet IDs to associate with Kinesis Firehose.

vpcId String

FirehoseDeliveryStreamExtendedS3Configuration

BucketArn string

The ARN of the S3 bucket

RoleArn string

The ARN of the role that provides access to the source Kinesis stream.

BufferInterval int

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.

BufferSize int

Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

CloudwatchLoggingOptions FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions

The CloudWatch Logging Options for the delivery stream. More details are given below

CompressionFormat string

The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

DataFormatConversionConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration

Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.

DynamicPartitioningConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration

The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.

ErrorOutputPrefix string

Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

KmsKeyArn string

Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

Prefix string

The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

ProcessingConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration

The data processing configuration. More details are given below.

S3BackupConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfiguration

The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object.

S3BackupMode string

The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled.

BucketArn string

The ARN of the S3 bucket

RoleArn string

The ARN of the role that provides access to the source Kinesis stream.

BufferInterval int

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.

BufferSize int

Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

CloudwatchLoggingOptions FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions

The CloudWatch Logging Options for the delivery stream. More details are given below

CompressionFormat string

The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

DataFormatConversionConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration

Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.

DynamicPartitioningConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration

The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.

ErrorOutputPrefix string

Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

KmsKeyArn string

Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

Prefix string

The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

ProcessingConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration

The data processing configuration. More details are given below.

S3BackupConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfiguration

The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object.

S3BackupMode string

The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled.

bucketArn String

The ARN of the S3 bucket

roleArn String

The ARN of the role that provides access to the source Kinesis stream.

bufferInterval Integer

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.

bufferSize Integer

Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

cloudwatchLoggingOptions FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions

The CloudWatch Logging Options for the delivery stream. More details are given below

compressionFormat String

The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

dataFormatConversionConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration

Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.

dynamicPartitioningConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration

The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.

errorOutputPrefix String

Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

kmsKeyArn String

Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

prefix String

The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

processingConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration

The data processing configuration. More details are given below.

s3BackupConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfiguration

The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object.

s3BackupMode String

The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled.

bucketArn string

The ARN of the S3 bucket

roleArn string

The ARN of the role that provides access to the source Kinesis stream.

bufferInterval number

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.

bufferSize number

Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

cloudwatchLoggingOptions FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions

The CloudWatch Logging Options for the delivery stream. More details are given below

compressionFormat string

The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

dataFormatConversionConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration

Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.

dynamicPartitioningConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration

The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.

errorOutputPrefix string

Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

kmsKeyArn string

Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

prefix string

The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

processingConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration

The data processing configuration. More details are given below.

s3BackupConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfiguration

The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object.

s3BackupMode string

The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled.

bucket_arn str

The ARN of the S3 bucket

role_arn str

The ARN of the role that provides access to the source Kinesis stream.

buffer_interval int

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.

buffer_size int

Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

cloudwatch_logging_options FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions

The CloudWatch Logging Options for the delivery stream. More details are given below

compression_format str

The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

data_format_conversion_configuration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration

Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.

dynamic_partitioning_configuration FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration

The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.

error_output_prefix str

Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

kms_key_arn str

Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

prefix str

The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

processing_configuration FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration

The data processing configuration. More details are given below.

s3_backup_configuration FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfiguration

The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object.

s3_backup_mode str

The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled.

bucketArn String

The ARN of the S3 bucket

roleArn String

The ARN of the role that provides access to the source Kinesis stream.

bufferInterval Number

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.

bufferSize Number

Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

cloudwatchLoggingOptions Property Map

The CloudWatch Logging Options for the delivery stream. More details are given below

compressionFormat String

The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY.

dataFormatConversionConfiguration Property Map

Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.

dynamicPartitioningConfiguration Property Map

The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.

errorOutputPrefix String

Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

kmsKeyArn String

Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.

prefix String

The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket

processingConfiguration Property Map

The data processing configuration. More details are given below.

s3BackupConfiguration Property Map

The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object.

s3BackupMode String

The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled.

FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions

Enabled bool

Enables or disables the logging. Defaults to false.

LogGroupName string

The CloudWatch group name for logging. This value is required if enabled is true.

LogStreamName string

The CloudWatch log stream name for logging. This value is required if enabled is true.

Enabled bool

Enables or disables the logging. Defaults to false.

LogGroupName string

The CloudWatch group name for logging. This value is required if enabled is true.

LogStreamName string

The CloudWatch log stream name for logging. This value is required if enabled is true.

enabled Boolean

Enables or disables the logging. Defaults to false.

logGroupName String

The CloudWatch group name for logging. This value is required if enabled is true.

logStreamName String

The CloudWatch log stream name for logging. This value is required if enabled is true.

enabled boolean

Enables or disables the logging. Defaults to false.

logGroupName string

The CloudWatch group name for logging. This value is required if enabled is true.

logStreamName string

The CloudWatch log stream name for logging. This value is required if enabled is true.

enabled bool

Enables or disables the logging. Defaults to false.

log_group_name str

The CloudWatch group name for logging. This value is required if enabled is true.

log_stream_name str

The CloudWatch log stream name for logging. This value is required if enabled is true.

enabled Boolean

Enables or disables the logging. Defaults to false.

logGroupName String

The CloudWatch group name for logging. This value is required if enabled is true.

logStreamName String

The CloudWatch log stream name for logging. This value is required if enabled is true.

FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration

InputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration

Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.

OutputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration

Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.

SchemaConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration

Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.

Enabled bool

Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

InputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration

Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.

OutputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration

Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.

SchemaConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration

Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.

Enabled bool

Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

inputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration

Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.

outputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration

Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.

schemaConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration

Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.

enabled Boolean

Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

inputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration

Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.

outputFormatConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration

Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.

schemaConfiguration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration

Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.

enabled boolean

Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

input_format_configuration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration

Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.

output_format_configuration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration

Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.

schema_configuration FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration

Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.

enabled bool

Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

inputFormatConfiguration Property Map

Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.

outputFormatConfiguration Property Map

Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.

schemaConfiguration Property Map

Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.

enabled Boolean

Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration

Deserializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer

Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.

Deserializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer

Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.

deserializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer

Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.

deserializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer

Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.

deserializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer

Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.

deserializer Property Map

Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.

FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer

hiveJsonSerDe Property Map

Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.

openXJsonSerDe Property Map

Nested argument that specifies the OpenX SerDe. More details below.

FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDe

TimestampFormats List<string>

A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

TimestampFormats []string

A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

timestampFormats List<String>

A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

timestampFormats string[]

A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

timestamp_formats Sequence[str]

A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

timestampFormats List<String>

A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDe

CaseInsensitive bool

When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

ColumnToJsonKeyMappings Dictionary<string, string>

A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts.

ConvertDotsInJsonKeysToUnderscores bool

When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false.

CaseInsensitive bool

When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

ColumnToJsonKeyMappings map[string]string

A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts.

ConvertDotsInJsonKeysToUnderscores bool

When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false.

caseInsensitive Boolean

When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

columnToJsonKeyMappings Map<String,String>

A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts.

convertDotsInJsonKeysToUnderscores Boolean

When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false.

caseInsensitive boolean

When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

columnToJsonKeyMappings {[key: string]: string}

A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts.

convertDotsInJsonKeysToUnderscores boolean

When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false.

case_insensitive bool

When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

column_to_json_key_mappings Mapping[str, str]

A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts.

convert_dots_in_json_keys_to_underscores bool

When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false.

caseInsensitive Boolean

When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

columnToJsonKeyMappings Map<String>

A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts.

convertDotsInJsonKeysToUnderscores Boolean

When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false.

FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration

Serializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer

Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.

Serializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer

Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.

serializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer

Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.

serializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer

Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.

serializer FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer

Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.

serializer Property Map

Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.

FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer

OrcSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe

Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.

ParquetSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe

Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.

OrcSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe

Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.

ParquetSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe

Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.

orcSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe

Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.

parquetSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe

Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.

orcSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe

Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.

parquetSerDe FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe

Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.

orc_ser_de FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe

Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.

parquet_ser_de FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe

Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.

orcSerDe Property Map

Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.

parquetSerDe Property Map

Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.

FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe

BlockSizeBytes int

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

BloomFilterColumns List<string>

A list of column names for which you want Kinesis Data Firehose to create bloom filters.

BloomFilterFalsePositiveProbability double

The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

Compression string

The compression code to use over data blocks. The default is SNAPPY.

DictionaryKeyThreshold double

A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

EnablePadding bool

Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

FormatVersion string

The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

PaddingTolerance double

A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false.

RowIndexStride int

The number of rows between index entries. The default is 10000 and the minimum is 1000.

StripeSizeBytes int

The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

BlockSizeBytes int

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

BloomFilterColumns []string

A list of column names for which you want Kinesis Data Firehose to create bloom filters.

BloomFilterFalsePositiveProbability float64

The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

Compression string

The compression code to use over data blocks. The default is SNAPPY.

DictionaryKeyThreshold float64

A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

EnablePadding bool

Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

FormatVersion string

The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

PaddingTolerance float64

A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false.

RowIndexStride int

The number of rows between index entries. The default is 10000 and the minimum is 1000.

StripeSizeBytes int

The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

blockSizeBytes Integer

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

bloomFilterColumns List<String>

A list of column names for which you want Kinesis Data Firehose to create bloom filters.

bloomFilterFalsePositiveProbability Double

The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

compression String

The compression code to use over data blocks. The default is SNAPPY.

dictionaryKeyThreshold Double

A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

enablePadding Boolean

Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

formatVersion String

The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

paddingTolerance Double

A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false.

rowIndexStride Integer

The number of rows between index entries. The default is 10000 and the minimum is 1000.

stripeSizeBytes Integer

The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

blockSizeBytes number

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

bloomFilterColumns string[]

A list of column names for which you want Kinesis Data Firehose to create bloom filters.

bloomFilterFalsePositiveProbability number

The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

compression string

The compression code to use over data blocks. The default is SNAPPY.

dictionaryKeyThreshold number

A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

enablePadding boolean

Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

formatVersion string

The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

paddingTolerance number

A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false.

rowIndexStride number

The number of rows between index entries. The default is 10000 and the minimum is 1000.

stripeSizeBytes number

The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

block_size_bytes int

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

bloom_filter_columns Sequence[str]

A list of column names for which you want Kinesis Data Firehose to create bloom filters.

bloom_filter_false_positive_probability float

The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

compression str

The compression code to use over data blocks. The default is SNAPPY.

dictionary_key_threshold float

A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

enable_padding bool

Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

format_version str

The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

padding_tolerance float

A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false.

row_index_stride int

The number of rows between index entries. The default is 10000 and the minimum is 1000.

stripe_size_bytes int

The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

blockSizeBytes Number

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

bloomFilterColumns List<String>

A list of column names for which you want Kinesis Data Firehose to create bloom filters.

bloomFilterFalsePositiveProbability Number

The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

compression String

The compression code to use over data blocks. The default is SNAPPY.

dictionaryKeyThreshold Number

A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

enablePadding Boolean

Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

formatVersion String

The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

paddingTolerance Number

A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false.

rowIndexStride Number

The number of rows between index entries. The default is 10000 and the minimum is 1000.

stripeSizeBytes Number

The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe

BlockSizeBytes int

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

Compression string

The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.

EnableDictionaryCompression bool

Indicates whether to enable dictionary compression.

MaxPaddingBytes int

The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.

PageSizeBytes int

The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.

WriterVersion string

Indicates the version of row format to output. The possible values are V1 and V2. The default is V1.

BlockSizeBytes int

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

Compression string

The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.

EnableDictionaryCompression bool

Indicates whether to enable dictionary compression.

MaxPaddingBytes int

The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.

PageSizeBytes int

The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.

WriterVersion string

Indicates the version of row format to output. The possible values are V1 and V2. The default is V1.

blockSizeBytes Integer

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

compression String

The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.

enableDictionaryCompression Boolean

Indicates whether to enable dictionary compression.

maxPaddingBytes Integer

The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.

pageSizeBytes Integer

The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.

writerVersion String

Indicates the version of row format to output. The possible values are V1 and V2. The default is V1.

blockSizeBytes number

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

compression string

The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.

enableDictionaryCompression boolean

Indicates whether to enable dictionary compression.

maxPaddingBytes number

The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.

pageSizeBytes number

The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.

writerVersion string

Indicates the version of row format to output. The possible values are V1 and V2. The default is V1.

block_size_bytes int

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

compression str

The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.

enable_dictionary_compression bool

Indicates whether to enable dictionary compression.

max_padding_bytes int

The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.

page_size_bytes int

The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.

writer_version str

Indicates the version of row format to output. The possible values are V1 and V2. The default is V1.

blockSizeBytes Number

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

compression String

The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.

enableDictionaryCompression Boolean

Indicates whether to enable dictionary compression.

maxPaddingBytes Number

The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.

pageSizeBytes Number

The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.

writerVersion String

Indicates the version of row format to output. The possible values are V1 and V2. The default is V1.

FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration

DatabaseName string

Specifies the name of the AWS Glue database that contains the schema for the output data.

RoleArn string

The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.

TableName string

Specifies the AWS Glue table that contains the column information that constitutes your data schema.

CatalogId string

The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.

Region string

If you don't specify an AWS Region, the default is the current region.

VersionId string

Specifies the table version for the output data schema. Defaults to LATEST.

DatabaseName string

Specifies the name of the AWS Glue database that contains the schema for the output data.

RoleArn string

The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.

TableName string

Specifies the AWS Glue table that contains the column information that constitutes your data schema.

CatalogId string

The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.

Region string

If you don't specify an AWS Region, the default is the current region.

VersionId string

Specifies the table version for the output data schema. Defaults to LATEST.

databaseName String

Specifies the name of the AWS Glue database that contains the schema for the output data.

roleArn String

The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.

tableName String

Specifies the AWS Glue table that contains the column information that constitutes your data schema.

catalogId String

The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.

region String

If you don't specify an AWS Region, the default is the current region.

versionId String

Specifies the table version for the output data schema. Defaults to LATEST.

databaseName string

Specifies the name of the AWS Glue database that contains the schema for the output data.

roleArn string

The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.

tableName string

Specifies the AWS Glue table that contains the column information that constitutes your data schema.

catalogId string

The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.

region string

If you don't specify an AWS Region, the default is the current region.

versionId string

Specifies the table version for the output data schema. Defaults to LATEST.

database_name str

Specifies the name of the AWS Glue database that contains the schema for the output data.

role_arn str

The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.

table_name str

Specifies the AWS Glue table that contains the column information that constitutes your data schema.

catalog_id str

The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.

region str

If you don't specify an AWS Region, the default is the current region.

version_id str

Specifies the table version for the output data schema. Defaults to LATEST.

databaseName String

Specifies the name of the AWS Glue database that contains the schema for the output data.

roleArn String

The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.

tableName String

Specifies the AWS Glue table that contains the column information that constitutes your data schema.

catalogId String

The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.

region String

If you don't specify an AWS Region, the default is the current region.

versionId String

Specifies the table version for the output data schema. Defaults to LATEST.

FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration

Enabled bool

Enables or disables dynamic partitioning. Defaults to false.

RetryDuration int

Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.

Enabled bool

Enables or disables dynamic partitioning. Defaults to false.

RetryDuration int

Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.

enabled Boolean

Enables or disables dynamic partitioning. Defaults to false.

retryDuration Integer

Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.

enabled boolean

Enables or disables dynamic partitioning. Defaults to false.

retryDuration number

Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.

enabled bool

Enables or disables dynamic partitioning. Defaults to false.

retry_duration int

Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.

enabled Boolean

Enables or disables dynamic partitioning. Defaults to false.

retryDuration Number

Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.

FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration

Enabled bool

Enables or disables data processing.

Processors List<FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessor>

Array of data processors. More details are given below

Enabled bool

Enables or disables data processing.

Processors []FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessor

Array of data processors. More details are given below

enabled Boolean

Enables or disables data processing.

processors List<FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessor>

Array of data processors. More details are given below

enabled boolean

Enables or disables data processing.

processors FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessor[]

Array of data processors. More details are given below

enabled bool

Enables or disables data processing.

processors Sequence[FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessor]

Array of data processors. More details are given below

enabled Boolean

Enables or disables data processing.

processors List<Property Map>

Array of data processors. More details are given below

FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessor

Type string

The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.

Parameters List<FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameter>

Array of processor parameters. More details are given below

Type string

The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRe