aws.kinesis.FirehoseDeliveryStream
Explore with Pulumi AI
Provides a Kinesis Firehose Delivery Stream resource. Amazon Kinesis Firehose is a fully managed, elastic service to easily deliver real-time data streams to destinations such as Amazon S3 and Amazon Redshift.
For more details, see the Amazon Kinesis Firehose Documentation.
Example Usage
Extended S3 Destination
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var bucket = new Aws.S3.BucketV2("bucket");
var firehoseAssumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
{
Statements = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
{
Effect = "Allow",
Principals = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
{
Type = "Service",
Identifiers = new[]
{
"firehose.amazonaws.com",
},
},
},
Actions = new[]
{
"sts:AssumeRole",
},
},
},
});
var firehoseRole = new Aws.Iam.Role("firehoseRole", new()
{
AssumeRolePolicy = firehoseAssumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
});
var lambdaAssumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
{
Statements = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
{
Effect = "Allow",
Principals = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
{
Type = "Service",
Identifiers = new[]
{
"lambda.amazonaws.com",
},
},
},
Actions = new[]
{
"sts:AssumeRole",
},
},
},
});
var lambdaIam = new Aws.Iam.Role("lambdaIam", new()
{
AssumeRolePolicy = lambdaAssumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
});
var lambdaProcessor = new Aws.Lambda.Function("lambdaProcessor", new()
{
Code = new FileArchive("lambda.zip"),
Role = lambdaIam.Arn,
Handler = "exports.handler",
Runtime = "nodejs16.x",
});
var extendedS3Stream = new Aws.Kinesis.FirehoseDeliveryStream("extendedS3Stream", new()
{
Destination = "extended_s3",
ExtendedS3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs
{
RoleArn = firehoseRole.Arn,
BucketArn = bucket.Arn,
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
{
Type = "Lambda",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "LambdaArn",
ParameterValue = lambdaProcessor.Arn.Apply(arn => $"{arn}:$LATEST"),
},
},
},
},
},
},
});
var bucketAcl = new Aws.S3.BucketAclV2("bucketAcl", new()
{
Bucket = bucket.Id,
Acl = "private",
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/lambda"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/s3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
bucket, err := s3.NewBucketV2(ctx, "bucket", nil)
if err != nil {
return err
}
firehoseAssumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
Statements: []iam.GetPolicyDocumentStatement{
{
Effect: pulumi.StringRef("Allow"),
Principals: []iam.GetPolicyDocumentStatementPrincipal{
{
Type: "Service",
Identifiers: []string{
"firehose.amazonaws.com",
},
},
},
Actions: []string{
"sts:AssumeRole",
},
},
},
}, nil)
if err != nil {
return err
}
firehoseRole, err := iam.NewRole(ctx, "firehoseRole", &iam.RoleArgs{
AssumeRolePolicy: *pulumi.String(firehoseAssumeRole.Json),
})
if err != nil {
return err
}
lambdaAssumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
Statements: []iam.GetPolicyDocumentStatement{
{
Effect: pulumi.StringRef("Allow"),
Principals: []iam.GetPolicyDocumentStatementPrincipal{
{
Type: "Service",
Identifiers: []string{
"lambda.amazonaws.com",
},
},
},
Actions: []string{
"sts:AssumeRole",
},
},
},
}, nil)
if err != nil {
return err
}
lambdaIam, err := iam.NewRole(ctx, "lambdaIam", &iam.RoleArgs{
AssumeRolePolicy: *pulumi.String(lambdaAssumeRole.Json),
})
if err != nil {
return err
}
lambdaProcessor, err := lambda.NewFunction(ctx, "lambdaProcessor", &lambda.FunctionArgs{
Code: pulumi.NewFileArchive("lambda.zip"),
Role: lambdaIam.Arn,
Handler: pulumi.String("exports.handler"),
Runtime: pulumi.String("nodejs16.x"),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "extendedS3Stream", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("extended_s3"),
ExtendedS3Configuration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs{
RoleArn: firehoseRole.Arn,
BucketArn: bucket.Arn,
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(true),
Processors: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("Lambda"),
Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("LambdaArn"),
ParameterValue: lambdaProcessor.Arn.ApplyT(func(arn string) (string, error) {
return fmt.Sprintf("%v:$LATEST", arn), nil
}).(pulumi.StringOutput),
},
},
},
},
},
},
})
if err != nil {
return err
}
_, err = s3.NewBucketAclV2(ctx, "bucketAcl", &s3.BucketAclV2Args{
Bucket: bucket.ID(),
Acl: pulumi.String("private"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.s3.BucketV2;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.lambda.Function;
import com.pulumi.aws.lambda.FunctionArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs;
import com.pulumi.aws.s3.BucketAclV2;
import com.pulumi.aws.s3.BucketAclV2Args;
import com.pulumi.asset.FileArchive;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bucket = new BucketV2("bucket");
final var firehoseAssumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
.statements(GetPolicyDocumentStatementArgs.builder()
.effect("Allow")
.principals(GetPolicyDocumentStatementPrincipalArgs.builder()
.type("Service")
.identifiers("firehose.amazonaws.com")
.build())
.actions("sts:AssumeRole")
.build())
.build());
var firehoseRole = new Role("firehoseRole", RoleArgs.builder()
.assumeRolePolicy(firehoseAssumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
.build());
final var lambdaAssumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
.statements(GetPolicyDocumentStatementArgs.builder()
.effect("Allow")
.principals(GetPolicyDocumentStatementPrincipalArgs.builder()
.type("Service")
.identifiers("lambda.amazonaws.com")
.build())
.actions("sts:AssumeRole")
.build())
.build());
var lambdaIam = new Role("lambdaIam", RoleArgs.builder()
.assumeRolePolicy(lambdaAssumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
.build());
var lambdaProcessor = new Function("lambdaProcessor", FunctionArgs.builder()
.code(new FileArchive("lambda.zip"))
.role(lambdaIam.arn())
.handler("exports.handler")
.runtime("nodejs16.x")
.build());
var extendedS3Stream = new FirehoseDeliveryStream("extendedS3Stream", FirehoseDeliveryStreamArgs.builder()
.destination("extended_s3")
.extendedS3Configuration(FirehoseDeliveryStreamExtendedS3ConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.bucketArn(bucket.arn())
.processingConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs.builder()
.enabled("true")
.processors(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
.type("Lambda")
.parameters(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("LambdaArn")
.parameterValue(lambdaProcessor.arn().applyValue(arn -> String.format("%s:$LATEST", arn)))
.build())
.build())
.build())
.build())
.build());
var bucketAcl = new BucketAclV2("bucketAcl", BucketAclV2Args.builder()
.bucket(bucket.id())
.acl("private")
.build());
}
}
import pulumi
import pulumi_aws as aws
bucket = aws.s3.BucketV2("bucket")
firehose_assume_role = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="Service",
identifiers=["firehose.amazonaws.com"],
)],
actions=["sts:AssumeRole"],
)])
firehose_role = aws.iam.Role("firehoseRole", assume_role_policy=firehose_assume_role.json)
lambda_assume_role = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="Service",
identifiers=["lambda.amazonaws.com"],
)],
actions=["sts:AssumeRole"],
)])
lambda_iam = aws.iam.Role("lambdaIam", assume_role_policy=lambda_assume_role.json)
lambda_processor = aws.lambda_.Function("lambdaProcessor",
code=pulumi.FileArchive("lambda.zip"),
role=lambda_iam.arn,
handler="exports.handler",
runtime="nodejs16.x")
extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extendedS3Stream",
destination="extended_s3",
extended_s3_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs(
role_arn=firehose_role.arn,
bucket_arn=bucket.arn,
processing_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs(
enabled=True,
processors=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
type="Lambda",
parameters=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
parameter_name="LambdaArn",
parameter_value=lambda_processor.arn.apply(lambda arn: f"{arn}:$LATEST"),
)],
)],
),
))
bucket_acl = aws.s3.BucketAclV2("bucketAcl",
bucket=bucket.id,
acl="private")
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const bucket = new aws.s3.BucketV2("bucket", {});
const firehoseAssumeRole = aws.iam.getPolicyDocument({
statements: [{
effect: "Allow",
principals: [{
type: "Service",
identifiers: ["firehose.amazonaws.com"],
}],
actions: ["sts:AssumeRole"],
}],
});
const firehoseRole = new aws.iam.Role("firehoseRole", {assumeRolePolicy: firehoseAssumeRole.then(firehoseAssumeRole => firehoseAssumeRole.json)});
const lambdaAssumeRole = aws.iam.getPolicyDocument({
statements: [{
effect: "Allow",
principals: [{
type: "Service",
identifiers: ["lambda.amazonaws.com"],
}],
actions: ["sts:AssumeRole"],
}],
});
const lambdaIam = new aws.iam.Role("lambdaIam", {assumeRolePolicy: lambdaAssumeRole.then(lambdaAssumeRole => lambdaAssumeRole.json)});
const lambdaProcessor = new aws.lambda.Function("lambdaProcessor", {
code: new pulumi.asset.FileArchive("lambda.zip"),
role: lambdaIam.arn,
handler: "exports.handler",
runtime: "nodejs16.x",
});
const extendedS3Stream = new aws.kinesis.FirehoseDeliveryStream("extendedS3Stream", {
destination: "extended_s3",
extendedS3Configuration: {
roleArn: firehoseRole.arn,
bucketArn: bucket.arn,
processingConfiguration: {
enabled: true,
processors: [{
type: "Lambda",
parameters: [{
parameterName: "LambdaArn",
parameterValue: pulumi.interpolate`${lambdaProcessor.arn}:$LATEST`,
}],
}],
},
},
});
const bucketAcl = new aws.s3.BucketAclV2("bucketAcl", {
bucket: bucket.id,
acl: "private",
});
resources:
extendedS3Stream:
type: aws:kinesis:FirehoseDeliveryStream
properties:
destination: extended_s3
extendedS3Configuration:
roleArn: ${firehoseRole.arn}
bucketArn: ${bucket.arn}
processingConfiguration:
enabled: 'true'
processors:
- type: Lambda
parameters:
- parameterName: LambdaArn
parameterValue: ${lambdaProcessor.arn}:$LATEST
bucket:
type: aws:s3:BucketV2
bucketAcl:
type: aws:s3:BucketAclV2
properties:
bucket: ${bucket.id}
acl: private
firehoseRole:
type: aws:iam:Role
properties:
assumeRolePolicy: ${firehoseAssumeRole.json}
lambdaIam:
type: aws:iam:Role
properties:
assumeRolePolicy: ${lambdaAssumeRole.json}
lambdaProcessor:
type: aws:lambda:Function
properties:
code:
fn::FileArchive: lambda.zip
role: ${lambdaIam.arn}
handler: exports.handler
runtime: nodejs16.x
variables:
firehoseAssumeRole:
fn::invoke:
Function: aws:iam:getPolicyDocument
Arguments:
statements:
- effect: Allow
principals:
- type: Service
identifiers:
- firehose.amazonaws.com
actions:
- sts:AssumeRole
lambdaAssumeRole:
fn::invoke:
Function: aws:iam:getPolicyDocument
Arguments:
statements:
- effect: Allow
principals:
- type: Service
identifiers:
- lambda.amazonaws.com
actions:
- sts:AssumeRole
Extended S3 Destination with dynamic partitioning
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var extendedS3Stream = new Aws.Kinesis.FirehoseDeliveryStream("extendedS3Stream", new()
{
Destination = "extended_s3",
ExtendedS3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs
{
RoleArn = aws_iam_role.Firehose_role.Arn,
BucketArn = aws_s3_bucket.Bucket.Arn,
BufferSize = 64,
DynamicPartitioningConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs
{
Enabled = true,
},
Prefix = "data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
ErrorOutputPrefix = "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
{
Type = "RecordDeAggregation",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "SubRecordType",
ParameterValue = "JSON",
},
},
},
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
{
Type = "AppendDelimiterToRecord",
},
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
{
Type = "MetadataExtraction",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "JsonParsingEngine",
ParameterValue = "JQ-1.6",
},
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "MetadataExtractionQuery",
ParameterValue = "{customer_id:.customer_id}",
},
},
},
},
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kinesis.NewFirehoseDeliveryStream(ctx, "extendedS3Stream", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("extended_s3"),
ExtendedS3Configuration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs{
RoleArn: pulumi.Any(aws_iam_role.Firehose_role.Arn),
BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
BufferSize: pulumi.Int(64),
DynamicPartitioningConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs{
Enabled: pulumi.Bool(true),
},
Prefix: pulumi.String("data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/"),
ErrorOutputPrefix: pulumi.String("errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(true),
Processors: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("RecordDeAggregation"),
Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("SubRecordType"),
ParameterValue: pulumi.String("JSON"),
},
},
},
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("AppendDelimiterToRecord"),
},
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("MetadataExtraction"),
Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("JsonParsingEngine"),
ParameterValue: pulumi.String("JQ-1.6"),
},
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("MetadataExtractionQuery"),
ParameterValue: pulumi.String("{customer_id:.customer_id}"),
},
},
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var extendedS3Stream = new FirehoseDeliveryStream("extendedS3Stream", FirehoseDeliveryStreamArgs.builder()
.destination("extended_s3")
.extendedS3Configuration(FirehoseDeliveryStreamExtendedS3ConfigurationArgs.builder()
.roleArn(aws_iam_role.firehose_role().arn())
.bucketArn(aws_s3_bucket.bucket().arn())
.bufferSize(64)
.dynamicPartitioningConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs.builder()
.enabled("true")
.build())
.prefix("data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/")
.errorOutputPrefix("errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/")
.processingConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs.builder()
.enabled("true")
.processors(
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
.type("RecordDeAggregation")
.parameters(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("SubRecordType")
.parameterValue("JSON")
.build())
.build(),
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
.type("AppendDelimiterToRecord")
.build(),
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
.type("MetadataExtraction")
.parameters(
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("JsonParsingEngine")
.parameterValue("JQ-1.6")
.build(),
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("MetadataExtractionQuery")
.parameterValue("{customer_id:.customer_id}")
.build())
.build())
.build())
.build())
.build());
}
}
import pulumi
import pulumi_aws as aws
extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extendedS3Stream",
destination="extended_s3",
extended_s3_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=64,
dynamic_partitioning_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs(
enabled=True,
),
prefix="data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
error_output_prefix="errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
processing_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs(
enabled=True,
processors=[
aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
type="RecordDeAggregation",
parameters=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
parameter_name="SubRecordType",
parameter_value="JSON",
)],
),
aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
type="AppendDelimiterToRecord",
),
aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
type="MetadataExtraction",
parameters=[
aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
parameter_name="JsonParsingEngine",
parameter_value="JQ-1.6",
),
aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
parameter_name="MetadataExtractionQuery",
parameter_value="{customer_id:.customer_id}",
),
],
),
],
),
))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const extendedS3Stream = new aws.kinesis.FirehoseDeliveryStream("extendedS3Stream", {
destination: "extended_s3",
extendedS3Configuration: {
roleArn: aws_iam_role.firehose_role.arn,
bucketArn: aws_s3_bucket.bucket.arn,
bufferSize: 64,
dynamicPartitioningConfiguration: {
enabled: true,
},
prefix: "data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
errorOutputPrefix: "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
processingConfiguration: {
enabled: true,
processors: [
{
type: "RecordDeAggregation",
parameters: [{
parameterName: "SubRecordType",
parameterValue: "JSON",
}],
},
{
type: "AppendDelimiterToRecord",
},
{
type: "MetadataExtraction",
parameters: [
{
parameterName: "JsonParsingEngine",
parameterValue: "JQ-1.6",
},
{
parameterName: "MetadataExtractionQuery",
parameterValue: "{customer_id:.customer_id}",
},
],
},
],
},
},
});
resources:
extendedS3Stream:
type: aws:kinesis:FirehoseDeliveryStream
properties:
destination: extended_s3
extendedS3Configuration:
roleArn: ${aws_iam_role.firehose_role.arn}
bucketArn: ${aws_s3_bucket.bucket.arn}
bufferSize: 64
dynamicPartitioningConfiguration:
enabled: 'true'
prefix: data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/
errorOutputPrefix: errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/
processingConfiguration:
enabled: 'true'
processors:
- type: RecordDeAggregation
parameters:
- parameterName: SubRecordType
parameterValue: JSON
- type: AppendDelimiterToRecord
- type: MetadataExtraction
parameters:
- parameterName: JsonParsingEngine
parameterValue: JQ-1.6
- parameterName: MetadataExtractionQuery
parameterValue: '{customer_id:.customer_id}'
S3 Destination (deprecated)
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var bucket = new Aws.S3.BucketV2("bucket");
var bucketAcl = new Aws.S3.BucketAclV2("bucketAcl", new()
{
Bucket = bucket.Id,
Acl = "private",
});
var assumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
{
Statements = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
{
Effect = "Allow",
Principals = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
{
Type = "Service",
Identifiers = new[]
{
"firehose.amazonaws.com",
},
},
},
Actions = new[]
{
"sts:AssumeRole",
},
},
},
});
var firehoseRole = new Aws.Iam.Role("firehoseRole", new()
{
AssumeRolePolicy = assumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
});
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
{
Destination = "s3",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
{
RoleArn = firehoseRole.Arn,
BucketArn = bucket.Arn,
},
});
});
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/s3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
bucket, err := s3.NewBucketV2(ctx, "bucket", nil)
if err != nil {
return err
}
_, err = s3.NewBucketAclV2(ctx, "bucketAcl", &s3.BucketAclV2Args{
Bucket: bucket.ID(),
Acl: pulumi.String("private"),
})
if err != nil {
return err
}
assumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
Statements: []iam.GetPolicyDocumentStatement{
{
Effect: pulumi.StringRef("Allow"),
Principals: []iam.GetPolicyDocumentStatementPrincipal{
{
Type: "Service",
Identifiers: []string{
"firehose.amazonaws.com",
},
},
},
Actions: []string{
"sts:AssumeRole",
},
},
},
}, nil)
if err != nil {
return err
}
firehoseRole, err := iam.NewRole(ctx, "firehoseRole", &iam.RoleArgs{
AssumeRolePolicy: *pulumi.String(assumeRole.Json),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("s3"),
S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
RoleArn: firehoseRole.Arn,
BucketArn: bucket.Arn,
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.s3.BucketV2;
import com.pulumi.aws.s3.BucketAclV2;
import com.pulumi.aws.s3.BucketAclV2Args;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bucket = new BucketV2("bucket");
var bucketAcl = new BucketAclV2("bucketAcl", BucketAclV2Args.builder()
.bucket(bucket.id())
.acl("private")
.build());
final var assumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
.statements(GetPolicyDocumentStatementArgs.builder()
.effect("Allow")
.principals(GetPolicyDocumentStatementPrincipalArgs.builder()
.type("Service")
.identifiers("firehose.amazonaws.com")
.build())
.actions("sts:AssumeRole")
.build())
.build());
var firehoseRole = new Role("firehoseRole", RoleArgs.builder()
.assumeRolePolicy(assumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
.build());
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.destination("s3")
.s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.bucketArn(bucket.arn())
.build())
.build());
}
}
import pulumi
import pulumi_aws as aws
bucket = aws.s3.BucketV2("bucket")
bucket_acl = aws.s3.BucketAclV2("bucketAcl",
bucket=bucket.id,
acl="private")
assume_role = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="Service",
identifiers=["firehose.amazonaws.com"],
)],
actions=["sts:AssumeRole"],
)])
firehose_role = aws.iam.Role("firehoseRole", assume_role_policy=assume_role.json)
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="s3",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=firehose_role.arn,
bucket_arn=bucket.arn,
))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const bucket = new aws.s3.BucketV2("bucket", {});
const bucketAcl = new aws.s3.BucketAclV2("bucketAcl", {
bucket: bucket.id,
acl: "private",
});
const assumeRole = aws.iam.getPolicyDocument({
statements: [{
effect: "Allow",
principals: [{
type: "Service",
identifiers: ["firehose.amazonaws.com"],
}],
actions: ["sts:AssumeRole"],
}],
});
const firehoseRole = new aws.iam.Role("firehoseRole", {assumeRolePolicy: assumeRole.then(assumeRole => assumeRole.json)});
const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
destination: "s3",
s3Configuration: {
roleArn: firehoseRole.arn,
bucketArn: bucket.arn,
},
});
resources:
bucket:
type: aws:s3:BucketV2
bucketAcl:
type: aws:s3:BucketAclV2
properties:
bucket: ${bucket.id}
acl: private
firehoseRole:
type: aws:iam:Role
properties:
assumeRolePolicy: ${assumeRole.json}
testStream:
type: aws:kinesis:FirehoseDeliveryStream
properties:
destination: s3
s3Configuration:
roleArn: ${firehoseRole.arn}
bucketArn: ${bucket.arn}
variables:
assumeRole:
fn::invoke:
Function: aws:iam:getPolicyDocument
Arguments:
statements:
- effect: Allow
principals:
- type: Service
identifiers:
- firehose.amazonaws.com
actions:
- sts:AssumeRole
Redshift Destination
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testCluster = new Aws.RedShift.Cluster("testCluster", new()
{
ClusterIdentifier = "tf-redshift-cluster",
DatabaseName = "test",
MasterUsername = "testuser",
MasterPassword = "T3stPass",
NodeType = "dc1.large",
ClusterType = "single-node",
});
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
{
Destination = "redshift",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
{
RoleArn = aws_iam_role.Firehose_role.Arn,
BucketArn = aws_s3_bucket.Bucket.Arn,
BufferSize = 10,
BufferInterval = 400,
CompressionFormat = "GZIP",
},
RedshiftConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationArgs
{
RoleArn = aws_iam_role.Firehose_role.Arn,
ClusterJdbcurl = Output.Tuple(testCluster.Endpoint, testCluster.DatabaseName).Apply(values =>
{
var endpoint = values.Item1;
var databaseName = values.Item2;
return $"jdbc:redshift://{endpoint}/{databaseName}";
}),
Username = "testuser",
Password = "T3stPass",
DataTableName = "test-table",
CopyOptions = "delimiter '|'",
DataTableColumns = "test-col",
S3BackupMode = "Enabled",
S3BackupConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs
{
RoleArn = aws_iam_role.Firehose_role.Arn,
BucketArn = aws_s3_bucket.Bucket.Arn,
BufferSize = 15,
BufferInterval = 300,
CompressionFormat = "GZIP",
},
},
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/redshift"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCluster, err := redshift.NewCluster(ctx, "testCluster", &redshift.ClusterArgs{
ClusterIdentifier: pulumi.String("tf-redshift-cluster"),
DatabaseName: pulumi.String("test"),
MasterUsername: pulumi.String("testuser"),
MasterPassword: pulumi.String("T3stPass"),
NodeType: pulumi.String("dc1.large"),
ClusterType: pulumi.String("single-node"),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("redshift"),
S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
RoleArn: pulumi.Any(aws_iam_role.Firehose_role.Arn),
BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
BufferSize: pulumi.Int(10),
BufferInterval: pulumi.Int(400),
CompressionFormat: pulumi.String("GZIP"),
},
RedshiftConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationArgs{
RoleArn: pulumi.Any(aws_iam_role.Firehose_role.Arn),
ClusterJdbcurl: pulumi.All(testCluster.Endpoint, testCluster.DatabaseName).ApplyT(func(_args []interface{}) (string, error) {
endpoint := _args[0].(string)
databaseName := _args[1].(string)
return fmt.Sprintf("jdbc:redshift://%v/%v", endpoint, databaseName), nil
}).(pulumi.StringOutput),
Username: pulumi.String("testuser"),
Password: pulumi.String("T3stPass"),
DataTableName: pulumi.String("test-table"),
CopyOptions: pulumi.String("delimiter '|'"),
DataTableColumns: pulumi.String("test-col"),
S3BackupMode: pulumi.String("Enabled"),
S3BackupConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs{
RoleArn: pulumi.Any(aws_iam_role.Firehose_role.Arn),
BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
BufferSize: pulumi.Int(15),
BufferInterval: pulumi.Int(300),
CompressionFormat: pulumi.String("GZIP"),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.redshift.Cluster;
import com.pulumi.aws.redshift.ClusterArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamRedshiftConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testCluster = new Cluster("testCluster", ClusterArgs.builder()
.clusterIdentifier("tf-redshift-cluster")
.databaseName("test")
.masterUsername("testuser")
.masterPassword("T3stPass")
.nodeType("dc1.large")
.clusterType("single-node")
.build());
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.destination("redshift")
.s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
.roleArn(aws_iam_role.firehose_role().arn())
.bucketArn(aws_s3_bucket.bucket().arn())
.bufferSize(10)
.bufferInterval(400)
.compressionFormat("GZIP")
.build())
.redshiftConfiguration(FirehoseDeliveryStreamRedshiftConfigurationArgs.builder()
.roleArn(aws_iam_role.firehose_role().arn())
.clusterJdbcurl(Output.tuple(testCluster.endpoint(), testCluster.databaseName()).applyValue(values -> {
var endpoint = values.t1;
var databaseName = values.t2;
return String.format("jdbc:redshift://%s/%s", endpoint,databaseName);
}))
.username("testuser")
.password("T3stPass")
.dataTableName("test-table")
.copyOptions("delimiter '|'")
.dataTableColumns("test-col")
.s3BackupMode("Enabled")
.s3BackupConfiguration(FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs.builder()
.roleArn(aws_iam_role.firehose_role().arn())
.bucketArn(aws_s3_bucket.bucket().arn())
.bufferSize(15)
.bufferInterval(300)
.compressionFormat("GZIP")
.build())
.build())
.build());
}
}
import pulumi
import pulumi_aws as aws
test_cluster = aws.redshift.Cluster("testCluster",
cluster_identifier="tf-redshift-cluster",
database_name="test",
master_username="testuser",
master_password="T3stPass",
node_type="dc1.large",
cluster_type="single-node")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="redshift",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
redshift_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
cluster_jdbcurl=pulumi.Output.all(test_cluster.endpoint, test_cluster.database_name).apply(lambda endpoint, database_name: f"jdbc:redshift://{endpoint}/{database_name}"),
username="testuser",
password="T3stPass",
data_table_name="test-table",
copy_options="delimiter '|'",
data_table_columns="test-col",
s3_backup_mode="Enabled",
s3_backup_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=15,
buffer_interval=300,
compression_format="GZIP",
),
))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCluster = new aws.redshift.Cluster("testCluster", {
clusterIdentifier: "tf-redshift-cluster",
databaseName: "test",
masterUsername: "testuser",
masterPassword: "T3stPass",
nodeType: "dc1.large",
clusterType: "single-node",
});
const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
destination: "redshift",
s3Configuration: {
roleArn: aws_iam_role.firehose_role.arn,
bucketArn: aws_s3_bucket.bucket.arn,
bufferSize: 10,
bufferInterval: 400,
compressionFormat: "GZIP",
},
redshiftConfiguration: {
roleArn: aws_iam_role.firehose_role.arn,
clusterJdbcurl: pulumi.interpolate`jdbc:redshift://${testCluster.endpoint}/${testCluster.databaseName}`,
username: "testuser",
password: "T3stPass",
dataTableName: "test-table",
copyOptions: "delimiter '|'",
dataTableColumns: "test-col",
s3BackupMode: "Enabled",
s3BackupConfiguration: {
roleArn: aws_iam_role.firehose_role.arn,
bucketArn: aws_s3_bucket.bucket.arn,
bufferSize: 15,
bufferInterval: 300,
compressionFormat: "GZIP",
},
},
});
resources:
testCluster:
type: aws:redshift:Cluster
properties:
clusterIdentifier: tf-redshift-cluster
databaseName: test
masterUsername: testuser
masterPassword: T3stPass
nodeType: dc1.large
clusterType: single-node
testStream:
type: aws:kinesis:FirehoseDeliveryStream
properties:
destination: redshift
s3Configuration:
roleArn: ${aws_iam_role.firehose_role.arn}
bucketArn: ${aws_s3_bucket.bucket.arn}
bufferSize: 10
bufferInterval: 400
compressionFormat: GZIP
redshiftConfiguration:
roleArn: ${aws_iam_role.firehose_role.arn}
clusterJdbcurl: jdbc:redshift://${testCluster.endpoint}/${testCluster.databaseName}
username: testuser
password: T3stPass
dataTableName: test-table
copyOptions: delimiter '|'
dataTableColumns: test-col
s3BackupMode: Enabled
s3BackupConfiguration:
roleArn: ${aws_iam_role.firehose_role.arn}
bucketArn: ${aws_s3_bucket.bucket.arn}
bufferSize: 15
bufferInterval: 300
compressionFormat: GZIP
Elasticsearch Destination
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testCluster = new Aws.ElasticSearch.Domain("testCluster");
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
{
Destination = "elasticsearch",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
{
RoleArn = aws_iam_role.Firehose_role.Arn,
BucketArn = aws_s3_bucket.Bucket.Arn,
BufferSize = 10,
BufferInterval = 400,
CompressionFormat = "GZIP",
},
ElasticsearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs
{
DomainArn = testCluster.Arn,
RoleArn = aws_iam_role.Firehose_role.Arn,
IndexName = "test",
TypeName = "test",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs
{
Type = "Lambda",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "LambdaArn",
ParameterValue = $"{aws_lambda_function.Lambda_processor.Arn}:$LATEST",
},
},
},
},
},
},
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/elasticsearch"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCluster, err := elasticsearch.NewDomain(ctx, "testCluster", nil)
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("elasticsearch"),
S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
RoleArn: pulumi.Any(aws_iam_role.Firehose_role.Arn),
BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
BufferSize: pulumi.Int(10),
BufferInterval: pulumi.Int(400),
CompressionFormat: pulumi.String("GZIP"),
},
ElasticsearchConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs{
DomainArn: testCluster.Arn,
RoleArn: pulumi.Any(aws_iam_role.Firehose_role.Arn),
IndexName: pulumi.String("test"),
TypeName: pulumi.String("test"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(true),
Processors: kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("Lambda"),
Parameters: kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("LambdaArn"),
ParameterValue: pulumi.String(fmt.Sprintf("%v:$LATEST", aws_lambda_function.Lambda_processor.Arn)),
},
},
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.elasticsearch.Domain;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testCluster = new Domain("testCluster");
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.destination("elasticsearch")
.s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
.roleArn(aws_iam_role.firehose_role().arn())
.bucketArn(aws_s3_bucket.bucket().arn())
.bufferSize(10)
.bufferInterval(400)
.compressionFormat("GZIP")
.build())
.elasticsearchConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationArgs.builder()
.domainArn(testCluster.arn())
.roleArn(aws_iam_role.firehose_role().arn())
.indexName("test")
.typeName("test")
.processingConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs.builder()
.enabled("true")
.processors(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs.builder()
.type("Lambda")
.parameters(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("LambdaArn")
.parameterValue(String.format("%s:$LATEST", aws_lambda_function.lambda_processor().arn()))
.build())
.build())
.build())
.build())
.build());
}
}
import pulumi
import pulumi_aws as aws
test_cluster = aws.elasticsearch.Domain("testCluster")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="elasticsearch",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
elasticsearch_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs(
domain_arn=test_cluster.arn,
role_arn=aws_iam_role["firehose_role"]["arn"],
index_name="test",
type_name="test",
processing_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs(
enabled=True,
processors=[aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs(
type="Lambda",
parameters=[aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs(
parameter_name="LambdaArn",
parameter_value=f"{aws_lambda_function['lambda_processor']['arn']}:$LATEST",
)],
)],
),
))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCluster = new aws.elasticsearch.Domain("testCluster", {});
const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
destination: "elasticsearch",
s3Configuration: {
roleArn: aws_iam_role.firehose_role.arn,
bucketArn: aws_s3_bucket.bucket.arn,
bufferSize: 10,
bufferInterval: 400,
compressionFormat: "GZIP",
},
elasticsearchConfiguration: {
domainArn: testCluster.arn,
roleArn: aws_iam_role.firehose_role.arn,
indexName: "test",
typeName: "test",
processingConfiguration: {
enabled: true,
processors: [{
type: "Lambda",
parameters: [{
parameterName: "LambdaArn",
parameterValue: `${aws_lambda_function.lambda_processor.arn}:$LATEST`,
}],
}],
},
},
});
resources:
testCluster:
type: aws:elasticsearch:Domain
testStream:
type: aws:kinesis:FirehoseDeliveryStream
properties:
destination: elasticsearch
s3Configuration:
roleArn: ${aws_iam_role.firehose_role.arn}
bucketArn: ${aws_s3_bucket.bucket.arn}
bufferSize: 10
bufferInterval: 400
compressionFormat: GZIP
elasticsearchConfiguration:
domainArn: ${testCluster.arn}
roleArn: ${aws_iam_role.firehose_role.arn}
indexName: test
typeName: test
processingConfiguration:
enabled: 'true'
processors:
- type: Lambda
parameters:
- parameterName: LambdaArn
parameterValue: ${aws_lambda_function.lambda_processor.arn}:$LATEST
Elasticsearch Destination With VPC
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testCluster = new Aws.ElasticSearch.Domain("testCluster", new()
{
ClusterConfig = new Aws.ElasticSearch.Inputs.DomainClusterConfigArgs
{
InstanceCount = 2,
ZoneAwarenessEnabled = true,
InstanceType = "t2.small.elasticsearch",
},
EbsOptions = new Aws.ElasticSearch.Inputs.DomainEbsOptionsArgs
{
EbsEnabled = true,
VolumeSize = 10,
},
VpcOptions = new Aws.ElasticSearch.Inputs.DomainVpcOptionsArgs
{
SecurityGroupIds = new[]
{
aws_security_group.First.Id,
},
SubnetIds = new[]
{
aws_subnet.First.Id,
aws_subnet.Second.Id,
},
},
});
var firehose_elasticsearchPolicyDocument = Aws.Iam.GetPolicyDocument.Invoke(new()
{
Statements = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
{
Effect = "Allow",
Actions = new[]
{
"es:*",
},
Resources = new[]
{
testCluster.Arn,
$"{testCluster.Arn}/*",
},
},
new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
{
Effect = "Allow",
Actions = new[]
{
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface",
},
Resources = new[]
{
"*",
},
},
},
});
var firehose_elasticsearchRolePolicy = new Aws.Iam.RolePolicy("firehose-elasticsearchRolePolicy", new()
{
Role = aws_iam_role.Firehose.Id,
Policy = firehose_elasticsearchPolicyDocument.Apply(firehose_elasticsearchPolicyDocument => firehose_elasticsearchPolicyDocument.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json)),
});
var test = new Aws.Kinesis.FirehoseDeliveryStream("test", new()
{
Destination = "elasticsearch",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
{
RoleArn = aws_iam_role.Firehose.Arn,
BucketArn = aws_s3_bucket.Bucket.Arn,
},
ElasticsearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs
{
DomainArn = testCluster.Arn,
RoleArn = aws_iam_role.Firehose.Arn,
IndexName = "test",
TypeName = "test",
VpcConfig = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs
{
SubnetIds = new[]
{
aws_subnet.First.Id,
aws_subnet.Second.Id,
},
SecurityGroupIds = new[]
{
aws_security_group.First.Id,
},
RoleArn = aws_iam_role.Firehose.Arn,
},
},
}, new CustomResourceOptions
{
DependsOn = new[]
{
firehose_elasticsearchRolePolicy,
},
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/elasticsearch"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCluster, err := elasticsearch.NewDomain(ctx, "testCluster", &elasticsearch.DomainArgs{
ClusterConfig: &elasticsearch.DomainClusterConfigArgs{
InstanceCount: pulumi.Int(2),
ZoneAwarenessEnabled: pulumi.Bool(true),
InstanceType: pulumi.String("t2.small.elasticsearch"),
},
EbsOptions: &elasticsearch.DomainEbsOptionsArgs{
EbsEnabled: pulumi.Bool(true),
VolumeSize: pulumi.Int(10),
},
VpcOptions: &elasticsearch.DomainVpcOptionsArgs{
SecurityGroupIds: pulumi.StringArray{
aws_security_group.First.Id,
},
SubnetIds: pulumi.StringArray{
aws_subnet.First.Id,
aws_subnet.Second.Id,
},
},
})
if err != nil {
return err
}
firehose_elasticsearchPolicyDocument := iam.GetPolicyDocumentOutput(ctx, iam.GetPolicyDocumentOutputArgs{
Statements: iam.GetPolicyDocumentStatementArray{
&iam.GetPolicyDocumentStatementArgs{
Effect: pulumi.String("Allow"),
Actions: pulumi.StringArray{
pulumi.String("es:*"),
},
Resources: pulumi.StringArray{
testCluster.Arn,
testCluster.Arn.ApplyT(func(arn string) (string, error) {
return fmt.Sprintf("%v/*", arn), nil
}).(pulumi.StringOutput),
},
},
&iam.GetPolicyDocumentStatementArgs{
Effect: pulumi.String("Allow"),
Actions: pulumi.StringArray{
pulumi.String("ec2:DescribeVpcs"),
pulumi.String("ec2:DescribeVpcAttribute"),
pulumi.String("ec2:DescribeSubnets"),
pulumi.String("ec2:DescribeSecurityGroups"),
pulumi.String("ec2:DescribeNetworkInterfaces"),
pulumi.String("ec2:CreateNetworkInterface"),
pulumi.String("ec2:CreateNetworkInterfacePermission"),
pulumi.String("ec2:DeleteNetworkInterface"),
},
Resources: pulumi.StringArray{
pulumi.String("*"),
},
},
},
}, nil)
_, err = iam.NewRolePolicy(ctx, "firehose-elasticsearchRolePolicy", &iam.RolePolicyArgs{
Role: pulumi.Any(aws_iam_role.Firehose.Id),
Policy: firehose_elasticsearchPolicyDocument.ApplyT(func(firehose_elasticsearchPolicyDocument iam.GetPolicyDocumentResult) (*string, error) {
return &firehose_elasticsearchPolicyDocument.Json, nil
}).(pulumi.StringPtrOutput),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("elasticsearch"),
S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
},
ElasticsearchConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs{
DomainArn: testCluster.Arn,
RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
IndexName: pulumi.String("test"),
TypeName: pulumi.String("test"),
VpcConfig: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs{
SubnetIds: pulumi.StringArray{
aws_subnet.First.Id,
aws_subnet.Second.Id,
},
SecurityGroupIds: pulumi.StringArray{
aws_security_group.First.Id,
},
RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
},
},
}, pulumi.DependsOn([]pulumi.Resource{
firehose_elasticsearchRolePolicy,
}))
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.elasticsearch.Domain;
import com.pulumi.aws.elasticsearch.DomainArgs;
import com.pulumi.aws.elasticsearch.inputs.DomainClusterConfigArgs;
import com.pulumi.aws.elasticsearch.inputs.DomainEbsOptionsArgs;
import com.pulumi.aws.elasticsearch.inputs.DomainVpcOptionsArgs;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.RolePolicy;
import com.pulumi.aws.iam.RolePolicyArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testCluster = new Domain("testCluster", DomainArgs.builder()
.clusterConfig(DomainClusterConfigArgs.builder()
.instanceCount(2)
.zoneAwarenessEnabled(true)
.instanceType("t2.small.elasticsearch")
.build())
.ebsOptions(DomainEbsOptionsArgs.builder()
.ebsEnabled(true)
.volumeSize(10)
.build())
.vpcOptions(DomainVpcOptionsArgs.builder()
.securityGroupIds(aws_security_group.first().id())
.subnetIds(
aws_subnet.first().id(),
aws_subnet.second().id())
.build())
.build());
final var firehose-elasticsearchPolicyDocument = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
.statements(
GetPolicyDocumentStatementArgs.builder()
.effect("Allow")
.actions("es:*")
.resources(
testCluster.arn(),
testCluster.arn().applyValue(arn -> String.format("%s/*", arn)))
.build(),
GetPolicyDocumentStatementArgs.builder()
.effect("Allow")
.actions(
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface")
.resources("*")
.build())
.build());
var firehose_elasticsearchRolePolicy = new RolePolicy("firehose-elasticsearchRolePolicy", RolePolicyArgs.builder()
.role(aws_iam_role.firehose().id())
.policy(firehose_elasticsearchPolicyDocument.applyValue(firehose_elasticsearchPolicyDocument -> firehose_elasticsearchPolicyDocument.json()))
.build());
var test = new FirehoseDeliveryStream("test", FirehoseDeliveryStreamArgs.builder()
.destination("elasticsearch")
.s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
.roleArn(aws_iam_role.firehose().arn())
.bucketArn(aws_s3_bucket.bucket().arn())
.build())
.elasticsearchConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationArgs.builder()
.domainArn(testCluster.arn())
.roleArn(aws_iam_role.firehose().arn())
.indexName("test")
.typeName("test")
.vpcConfig(FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs.builder()
.subnetIds(
aws_subnet.first().id(),
aws_subnet.second().id())
.securityGroupIds(aws_security_group.first().id())
.roleArn(aws_iam_role.firehose().arn())
.build())
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(firehose_elasticsearchRolePolicy)
.build());
}
}
import pulumi
import pulumi_aws as aws
test_cluster = aws.elasticsearch.Domain("testCluster",
cluster_config=aws.elasticsearch.DomainClusterConfigArgs(
instance_count=2,
zone_awareness_enabled=True,
instance_type="t2.small.elasticsearch",
),
ebs_options=aws.elasticsearch.DomainEbsOptionsArgs(
ebs_enabled=True,
volume_size=10,
),
vpc_options=aws.elasticsearch.DomainVpcOptionsArgs(
security_group_ids=[aws_security_group["first"]["id"]],
subnet_ids=[
aws_subnet["first"]["id"],
aws_subnet["second"]["id"],
],
))
firehose_elasticsearch_policy_document = aws.iam.get_policy_document_output(statements=[
aws.iam.GetPolicyDocumentStatementArgs(
effect="Allow",
actions=["es:*"],
resources=[
test_cluster.arn,
test_cluster.arn.apply(lambda arn: f"{arn}/*"),
],
),
aws.iam.GetPolicyDocumentStatementArgs(
effect="Allow",
actions=[
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface",
],
resources=["*"],
),
])
firehose_elasticsearch_role_policy = aws.iam.RolePolicy("firehose-elasticsearchRolePolicy",
role=aws_iam_role["firehose"]["id"],
policy=firehose_elasticsearch_policy_document.json)
test = aws.kinesis.FirehoseDeliveryStream("test",
destination="elasticsearch",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
),
elasticsearch_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs(
domain_arn=test_cluster.arn,
role_arn=aws_iam_role["firehose"]["arn"],
index_name="test",
type_name="test",
vpc_config=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs(
subnet_ids=[
aws_subnet["first"]["id"],
aws_subnet["second"]["id"],
],
security_group_ids=[aws_security_group["first"]["id"]],
role_arn=aws_iam_role["firehose"]["arn"],
),
),
opts=pulumi.ResourceOptions(depends_on=[firehose_elasticsearch_role_policy]))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCluster = new aws.elasticsearch.Domain("testCluster", {
clusterConfig: {
instanceCount: 2,
zoneAwarenessEnabled: true,
instanceType: "t2.small.elasticsearch",
},
ebsOptions: {
ebsEnabled: true,
volumeSize: 10,
},
vpcOptions: {
securityGroupIds: [aws_security_group.first.id],
subnetIds: [
aws_subnet.first.id,
aws_subnet.second.id,
],
},
});
const firehose-elasticsearchPolicyDocument = aws.iam.getPolicyDocumentOutput({
statements: [
{
effect: "Allow",
actions: ["es:*"],
resources: [
testCluster.arn,
pulumi.interpolate`${testCluster.arn}/*`,
],
},
{
effect: "Allow",
actions: [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface",
],
resources: ["*"],
},
],
});
const firehose_elasticsearchRolePolicy = new aws.iam.RolePolicy("firehose-elasticsearchRolePolicy", {
role: aws_iam_role.firehose.id,
policy: firehose_elasticsearchPolicyDocument.apply(firehose_elasticsearchPolicyDocument => firehose_elasticsearchPolicyDocument.json),
});
const test = new aws.kinesis.FirehoseDeliveryStream("test", {
destination: "elasticsearch",
s3Configuration: {
roleArn: aws_iam_role.firehose.arn,
bucketArn: aws_s3_bucket.bucket.arn,
},
elasticsearchConfiguration: {
domainArn: testCluster.arn,
roleArn: aws_iam_role.firehose.arn,
indexName: "test",
typeName: "test",
vpcConfig: {
subnetIds: [
aws_subnet.first.id,
aws_subnet.second.id,
],
securityGroupIds: [aws_security_group.first.id],
roleArn: aws_iam_role.firehose.arn,
},
},
}, {
dependsOn: [firehose_elasticsearchRolePolicy],
});
resources:
testCluster:
type: aws:elasticsearch:Domain
properties:
clusterConfig:
instanceCount: 2
zoneAwarenessEnabled: true
instanceType: t2.small.elasticsearch
ebsOptions:
ebsEnabled: true
volumeSize: 10
vpcOptions:
securityGroupIds:
- ${aws_security_group.first.id}
subnetIds:
- ${aws_subnet.first.id}
- ${aws_subnet.second.id}
firehose-elasticsearchRolePolicy:
type: aws:iam:RolePolicy
properties:
role: ${aws_iam_role.firehose.id}
policy: ${["firehose-elasticsearchPolicyDocument"].json}
test:
type: aws:kinesis:FirehoseDeliveryStream
properties:
destination: elasticsearch
s3Configuration:
roleArn: ${aws_iam_role.firehose.arn}
bucketArn: ${aws_s3_bucket.bucket.arn}
elasticsearchConfiguration:
domainArn: ${testCluster.arn}
roleArn: ${aws_iam_role.firehose.arn}
indexName: test
typeName: test
vpcConfig:
subnetIds:
- ${aws_subnet.first.id}
- ${aws_subnet.second.id}
securityGroupIds:
- ${aws_security_group.first.id}
roleArn: ${aws_iam_role.firehose.arn}
options:
dependson:
- ${["firehose-elasticsearchRolePolicy"]}
variables:
firehose-elasticsearchPolicyDocument:
fn::invoke:
Function: aws:iam:getPolicyDocument
Arguments:
statements:
- effect: Allow
actions:
- es:*
resources:
- ${testCluster.arn}
- ${testCluster.arn}/*
- effect: Allow
actions:
- ec2:DescribeVpcs
- ec2:DescribeVpcAttribute
- ec2:DescribeSubnets
- ec2:DescribeSecurityGroups
- ec2:DescribeNetworkInterfaces
- ec2:CreateNetworkInterface
- ec2:CreateNetworkInterfacePermission
- ec2:DeleteNetworkInterface
resources:
- '*'
Opensearch Destination
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testCluster = new Aws.OpenSearch.Domain("testCluster");
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
{
Destination = "opensearch",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
{
RoleArn = aws_iam_role.Firehose_role.Arn,
BucketArn = aws_s3_bucket.Bucket.Arn,
BufferSize = 10,
BufferInterval = 400,
CompressionFormat = "GZIP",
},
OpensearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs
{
DomainArn = testCluster.Arn,
RoleArn = aws_iam_role.Firehose_role.Arn,
IndexName = "test",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs
{
Type = "Lambda",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "LambdaArn",
ParameterValue = $"{aws_lambda_function.Lambda_processor.Arn}:$LATEST",
},
},
},
},
},
},
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/opensearch"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCluster, err := opensearch.NewDomain(ctx, "testCluster", nil)
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("opensearch"),
S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
RoleArn: pulumi.Any(aws_iam_role.Firehose_role.Arn),
BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
BufferSize: pulumi.Int(10),
BufferInterval: pulumi.Int(400),
CompressionFormat: pulumi.String("GZIP"),
},
OpensearchConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs{
DomainArn: testCluster.Arn,
RoleArn: pulumi.Any(aws_iam_role.Firehose_role.Arn),
IndexName: pulumi.String("test"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(true),
Processors: kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("Lambda"),
Parameters: kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("LambdaArn"),
ParameterValue: pulumi.String(fmt.Sprintf("%v:$LATEST", aws_lambda_function.Lambda_processor.Arn)),
},
},
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.opensearch.Domain;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testCluster = new Domain("testCluster");
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.destination("opensearch")
.s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
.roleArn(aws_iam_role.firehose_role().arn())
.bucketArn(aws_s3_bucket.bucket().arn())
.bufferSize(10)
.bufferInterval(400)
.compressionFormat("GZIP")
.build())
.opensearchConfiguration(FirehoseDeliveryStreamOpensearchConfigurationArgs.builder()
.domainArn(testCluster.arn())
.roleArn(aws_iam_role.firehose_role().arn())
.indexName("test")
.processingConfiguration(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs.builder()
.enabled("true")
.processors(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs.builder()
.type("Lambda")
.parameters(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("LambdaArn")
.parameterValue(String.format("%s:$LATEST", aws_lambda_function.lambda_processor().arn()))
.build())
.build())
.build())
.build())
.build());
}
}
import pulumi
import pulumi_aws as aws
test_cluster = aws.opensearch.Domain("testCluster")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="opensearch",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
opensearch_configuration=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs(
domain_arn=test_cluster.arn,
role_arn=aws_iam_role["firehose_role"]["arn"],
index_name="test",
processing_configuration=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs(
enabled=True,
processors=[aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs(
type="Lambda",
parameters=[aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs(
parameter_name="LambdaArn",
parameter_value=f"{aws_lambda_function['lambda_processor']['arn']}:$LATEST",
)],
)],
),
))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCluster = new aws.opensearch.Domain("testCluster", {});
const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
destination: "opensearch",
s3Configuration: {
roleArn: aws_iam_role.firehose_role.arn,
bucketArn: aws_s3_bucket.bucket.arn,
bufferSize: 10,
bufferInterval: 400,
compressionFormat: "GZIP",
},
opensearchConfiguration: {
domainArn: testCluster.arn,
roleArn: aws_iam_role.firehose_role.arn,
indexName: "test",
processingConfiguration: {
enabled: true,
processors: [{
type: "Lambda",
parameters: [{
parameterName: "LambdaArn",
parameterValue: `${aws_lambda_function.lambda_processor.arn}:$LATEST`,
}],
}],
},
},
});
resources:
testCluster:
type: aws:opensearch:Domain
testStream:
type: aws:kinesis:FirehoseDeliveryStream
properties:
destination: opensearch
s3Configuration:
roleArn: ${aws_iam_role.firehose_role.arn}
bucketArn: ${aws_s3_bucket.bucket.arn}
bufferSize: 10
bufferInterval: 400
compressionFormat: GZIP
opensearchConfiguration:
domainArn: ${testCluster.arn}
roleArn: ${aws_iam_role.firehose_role.arn}
indexName: test
processingConfiguration:
enabled: 'true'
processors:
- type: Lambda
parameters:
- parameterName: LambdaArn
parameterValue: ${aws_lambda_function.lambda_processor.arn}:$LATEST
Opensearch Destination With VPC
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testCluster = new Aws.OpenSearch.Domain("testCluster", new()
{
ClusterConfig = new Aws.OpenSearch.Inputs.DomainClusterConfigArgs
{
InstanceCount = 2,
ZoneAwarenessEnabled = true,
InstanceType = "m4.large.search",
},
EbsOptions = new Aws.OpenSearch.Inputs.DomainEbsOptionsArgs
{
EbsEnabled = true,
VolumeSize = 10,
},
VpcOptions = new Aws.OpenSearch.Inputs.DomainVpcOptionsArgs
{
SecurityGroupIds = new[]
{
aws_security_group.First.Id,
},
SubnetIds = new[]
{
aws_subnet.First.Id,
aws_subnet.Second.Id,
},
},
});
var firehose_opensearch = new Aws.Iam.RolePolicy("firehose-opensearch", new()
{
Role = aws_iam_role.Firehose.Id,
Policy = Output.Tuple(testCluster.Arn, testCluster.Arn).Apply(values =>
{
var testClusterArn = values.Item1;
var testClusterArn1 = values.Item2;
return @$"{{
""Version"": ""2012-10-17"",
""Statement"": [
{{
""Effect"": ""Allow"",
""Action"": [
""es:*""
],
""Resource"": [
""{testClusterArn}"",
""{testClusterArn1}/*""
]
}},
{{
""Effect"": ""Allow"",
""Action"": [
""ec2:DescribeVpcs"",
""ec2:DescribeVpcAttribute"",
""ec2:DescribeSubnets"",
""ec2:DescribeSecurityGroups"",
""ec2:DescribeNetworkInterfaces"",
""ec2:CreateNetworkInterface"",
""ec2:CreateNetworkInterfacePermission"",
""ec2:DeleteNetworkInterface""
],
""Resource"": [
""*""
]
}}
]
}}
";
}),
});
var test = new Aws.Kinesis.FirehoseDeliveryStream("test", new()
{
Destination = "opensearch",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
{
RoleArn = aws_iam_role.Firehose.Arn,
BucketArn = aws_s3_bucket.Bucket.Arn,
},
OpensearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs
{
DomainArn = testCluster.Arn,
RoleArn = aws_iam_role.Firehose.Arn,
IndexName = "test",
VpcConfig = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs
{
SubnetIds = new[]
{
aws_subnet.First.Id,
aws_subnet.Second.Id,
},
SecurityGroupIds = new[]
{
aws_security_group.First.Id,
},
RoleArn = aws_iam_role.Firehose.Arn,
},
},
}, new CustomResourceOptions
{
DependsOn = new[]
{
firehose_opensearch,
},
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/opensearch"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCluster, err := opensearch.NewDomain(ctx, "testCluster", &opensearch.DomainArgs{
ClusterConfig: &opensearch.DomainClusterConfigArgs{
InstanceCount: pulumi.Int(2),
ZoneAwarenessEnabled: pulumi.Bool(true),
InstanceType: pulumi.String("m4.large.search"),
},
EbsOptions: &opensearch.DomainEbsOptionsArgs{
EbsEnabled: pulumi.Bool(true),
VolumeSize: pulumi.Int(10),
},
VpcOptions: &opensearch.DomainVpcOptionsArgs{
SecurityGroupIds: pulumi.StringArray{
aws_security_group.First.Id,
},
SubnetIds: pulumi.StringArray{
aws_subnet.First.Id,
aws_subnet.Second.Id,
},
},
})
if err != nil {
return err
}
_, err = iam.NewRolePolicy(ctx, "firehose-opensearch", &iam.RolePolicyArgs{
Role: pulumi.Any(aws_iam_role.Firehose.Id),
Policy: pulumi.All(testCluster.Arn, testCluster.Arn).ApplyT(func(_args []interface{}) (string, error) {
testClusterArn := _args[0].(string)
testClusterArn1 := _args[1].(string)
return fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"%v",
"%v/*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}
]
}
`, testClusterArn, testClusterArn1), nil
}).(pulumi.StringOutput),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("opensearch"),
S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
},
OpensearchConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs{
DomainArn: testCluster.Arn,
RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
IndexName: pulumi.String("test"),
VpcConfig: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs{
SubnetIds: pulumi.StringArray{
aws_subnet.First.Id,
aws_subnet.Second.Id,
},
SecurityGroupIds: pulumi.StringArray{
aws_security_group.First.Id,
},
RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
},
},
}, pulumi.DependsOn([]pulumi.Resource{
firehose_opensearch,
}))
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.opensearch.Domain;
import com.pulumi.aws.opensearch.DomainArgs;
import com.pulumi.aws.opensearch.inputs.DomainClusterConfigArgs;
import com.pulumi.aws.opensearch.inputs.DomainEbsOptionsArgs;
import com.pulumi.aws.opensearch.inputs.DomainVpcOptionsArgs;
import com.pulumi.aws.iam.RolePolicy;
import com.pulumi.aws.iam.RolePolicyArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testCluster = new Domain("testCluster", DomainArgs.builder()
.clusterConfig(DomainClusterConfigArgs.builder()
.instanceCount(2)
.zoneAwarenessEnabled(true)
.instanceType("m4.large.search")
.build())
.ebsOptions(DomainEbsOptionsArgs.builder()
.ebsEnabled(true)
.volumeSize(10)
.build())
.vpcOptions(DomainVpcOptionsArgs.builder()
.securityGroupIds(aws_security_group.first().id())
.subnetIds(
aws_subnet.first().id(),
aws_subnet.second().id())
.build())
.build());
var firehose_opensearch = new RolePolicy("firehose-opensearch", RolePolicyArgs.builder()
.role(aws_iam_role.firehose().id())
.policy(Output.tuple(testCluster.arn(), testCluster.arn()).applyValue(values -> {
var testClusterArn = values.t1;
var testClusterArn1 = values.t2;
return """
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"%s",
"%s/*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}
]
}
", testClusterArn,testClusterArn1);
}))
.build());
var test = new FirehoseDeliveryStream("test", FirehoseDeliveryStreamArgs.builder()
.destination("opensearch")
.s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
.roleArn(aws_iam_role.firehose().arn())
.bucketArn(aws_s3_bucket.bucket().arn())
.build())
.opensearchConfiguration(FirehoseDeliveryStreamOpensearchConfigurationArgs.builder()
.domainArn(testCluster.arn())
.roleArn(aws_iam_role.firehose().arn())
.indexName("test")
.vpcConfig(FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs.builder()
.subnetIds(
aws_subnet.first().id(),
aws_subnet.second().id())
.securityGroupIds(aws_security_group.first().id())
.roleArn(aws_iam_role.firehose().arn())
.build())
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(firehose_opensearch)
.build());
}
}
import pulumi
import pulumi_aws as aws
test_cluster = aws.opensearch.Domain("testCluster",
cluster_config=aws.opensearch.DomainClusterConfigArgs(
instance_count=2,
zone_awareness_enabled=True,
instance_type="m4.large.search",
),
ebs_options=aws.opensearch.DomainEbsOptionsArgs(
ebs_enabled=True,
volume_size=10,
),
vpc_options=aws.opensearch.DomainVpcOptionsArgs(
security_group_ids=[aws_security_group["first"]["id"]],
subnet_ids=[
aws_subnet["first"]["id"],
aws_subnet["second"]["id"],
],
))
firehose_opensearch = aws.iam.RolePolicy("firehose-opensearch",
role=aws_iam_role["firehose"]["id"],
policy=pulumi.Output.all(test_cluster.arn, test_cluster.arn).apply(lambda testClusterArn, testClusterArn1: f"""{{
"Version": "2012-10-17",
"Statement": [
{{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"{test_cluster_arn}",
"{test_cluster_arn1}/*"
]
}},
{{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}}
]
}}
"""))
test = aws.kinesis.FirehoseDeliveryStream("test",
destination="opensearch",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
),
opensearch_configuration=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs(
domain_arn=test_cluster.arn,
role_arn=aws_iam_role["firehose"]["arn"],
index_name="test",
vpc_config=aws.kinesis.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs(
subnet_ids=[
aws_subnet["first"]["id"],
aws_subnet["second"]["id"],
],
security_group_ids=[aws_security_group["first"]["id"]],
role_arn=aws_iam_role["firehose"]["arn"],
),
),
opts=pulumi.ResourceOptions(depends_on=[firehose_opensearch]))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCluster = new aws.opensearch.Domain("testCluster", {
clusterConfig: {
instanceCount: 2,
zoneAwarenessEnabled: true,
instanceType: "m4.large.search",
},
ebsOptions: {
ebsEnabled: true,
volumeSize: 10,
},
vpcOptions: {
securityGroupIds: [aws_security_group.first.id],
subnetIds: [
aws_subnet.first.id,
aws_subnet.second.id,
],
},
});
const firehose_opensearch = new aws.iam.RolePolicy("firehose-opensearch", {
role: aws_iam_role.firehose.id,
policy: pulumi.interpolate`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"${testCluster.arn}",
"${testCluster.arn}/*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}
]
}
`,
});
const test = new aws.kinesis.FirehoseDeliveryStream("test", {
destination: "opensearch",
s3Configuration: {
roleArn: aws_iam_role.firehose.arn,
bucketArn: aws_s3_bucket.bucket.arn,
},
opensearchConfiguration: {
domainArn: testCluster.arn,
roleArn: aws_iam_role.firehose.arn,
indexName: "test",
vpcConfig: {
subnetIds: [
aws_subnet.first.id,
aws_subnet.second.id,
],
securityGroupIds: [aws_security_group.first.id],
roleArn: aws_iam_role.firehose.arn,
},
},
}, {
dependsOn: [firehose_opensearch],
});
resources:
testCluster:
type: aws:opensearch:Domain
properties:
clusterConfig:
instanceCount: 2
zoneAwarenessEnabled: true
instanceType: m4.large.search
ebsOptions:
ebsEnabled: true
volumeSize: 10
vpcOptions:
securityGroupIds:
- ${aws_security_group.first.id}
subnetIds:
- ${aws_subnet.first.id}
- ${aws_subnet.second.id}
firehose-opensearch:
type: aws:iam:RolePolicy
properties:
role: ${aws_iam_role.firehose.id}
policy: |
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"${testCluster.arn}",
"${testCluster.arn}/*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}
]
}
test:
type: aws:kinesis:FirehoseDeliveryStream
properties:
destination: opensearch
s3Configuration:
roleArn: ${aws_iam_role.firehose.arn}
bucketArn: ${aws_s3_bucket.bucket.arn}
opensearchConfiguration:
domainArn: ${testCluster.arn}
roleArn: ${aws_iam_role.firehose.arn}
indexName: test
vpcConfig:
subnetIds:
- ${aws_subnet.first.id}
- ${aws_subnet.second.id}
securityGroupIds:
- ${aws_security_group.first.id}
roleArn: ${aws_iam_role.firehose.arn}
options:
dependson:
- ${["firehose-opensearch"]}
Splunk Destination
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
{
Destination = "splunk",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
{
RoleArn = aws_iam_role.Firehose.Arn,
BucketArn = aws_s3_bucket.Bucket.Arn,
BufferSize = 10,
BufferInterval = 400,
CompressionFormat = "GZIP",
},
SplunkConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationArgs
{
HecEndpoint = "https://http-inputs-mydomain.splunkcloud.com:443",
HecToken = "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
HecAcknowledgmentTimeout = 600,
HecEndpointType = "Event",
S3BackupMode = "FailedEventsOnly",
},
});
});
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("splunk"),
S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
BufferSize: pulumi.Int(10),
BufferInterval: pulumi.Int(400),
CompressionFormat: pulumi.String("GZIP"),
},
SplunkConfiguration: &kinesis.FirehoseDeliveryStreamSplunkConfigurationArgs{
HecEndpoint: pulumi.String("https://http-inputs-mydomain.splunkcloud.com:443"),
HecToken: pulumi.String("51D4DA16-C61B-4F5F-8EC7-ED4301342A4A"),
HecAcknowledgmentTimeout: pulumi.Int(600),
HecEndpointType: pulumi.String("Event"),
S3BackupMode: pulumi.String("FailedEventsOnly"),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamSplunkConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.destination("splunk")
.s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
.roleArn(aws_iam_role.firehose().arn())
.bucketArn(aws_s3_bucket.bucket().arn())
.bufferSize(10)
.bufferInterval(400)
.compressionFormat("GZIP")
.build())
.splunkConfiguration(FirehoseDeliveryStreamSplunkConfigurationArgs.builder()
.hecEndpoint("https://http-inputs-mydomain.splunkcloud.com:443")
.hecToken("51D4DA16-C61B-4F5F-8EC7-ED4301342A4A")
.hecAcknowledgmentTimeout(600)
.hecEndpointType("Event")
.s3BackupMode("FailedEventsOnly")
.build())
.build());
}
}
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="splunk",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
splunk_configuration=aws.kinesis.FirehoseDeliveryStreamSplunkConfigurationArgs(
hec_endpoint="https://http-inputs-mydomain.splunkcloud.com:443",
hec_token="51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
hec_acknowledgment_timeout=600,
hec_endpoint_type="Event",
s3_backup_mode="FailedEventsOnly",
))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
destination: "splunk",
s3Configuration: {
roleArn: aws_iam_role.firehose.arn,
bucketArn: aws_s3_bucket.bucket.arn,
bufferSize: 10,
bufferInterval: 400,
compressionFormat: "GZIP",
},
splunkConfiguration: {
hecEndpoint: "https://http-inputs-mydomain.splunkcloud.com:443",
hecToken: "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
hecAcknowledgmentTimeout: 600,
hecEndpointType: "Event",
s3BackupMode: "FailedEventsOnly",
},
});
resources:
testStream:
type: aws:kinesis:FirehoseDeliveryStream
properties:
destination: splunk
s3Configuration:
roleArn: ${aws_iam_role.firehose.arn}
bucketArn: ${aws_s3_bucket.bucket.arn}
bufferSize: 10
bufferInterval: 400
compressionFormat: GZIP
splunkConfiguration:
hecEndpoint: https://http-inputs-mydomain.splunkcloud.com:443
hecToken: 51D4DA16-C61B-4F5F-8EC7-ED4301342A4A
hecAcknowledgmentTimeout: 600
hecEndpointType: Event
s3BackupMode: FailedEventsOnly
HTTP Endpoint (e.g., New Relic) Destination
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("testStream", new()
{
Destination = "http_endpoint",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamS3ConfigurationArgs
{
RoleArn = aws_iam_role.Firehose.Arn,
BucketArn = aws_s3_bucket.Bucket.Arn,
BufferSize = 10,
BufferInterval = 400,
CompressionFormat = "GZIP",
},
HttpEndpointConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationArgs
{
Url = "https://aws-api.newrelic.com/firehose/v1",
Name = "New Relic",
AccessKey = "my-key",
BufferingSize = 15,
BufferingInterval = 600,
RoleArn = aws_iam_role.Firehose.Arn,
S3BackupMode = "FailedDataOnly",
RequestConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs
{
ContentEncoding = "GZIP",
CommonAttributes = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs
{
Name = "testname",
Value = "testvalue",
},
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs
{
Name = "testname2",
Value = "testvalue2",
},
},
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kinesis.NewFirehoseDeliveryStream(ctx, "testStream", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("http_endpoint"),
S3Configuration: &kinesis.FirehoseDeliveryStreamS3ConfigurationArgs{
RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
BucketArn: pulumi.Any(aws_s3_bucket.Bucket.Arn),
BufferSize: pulumi.Int(10),
BufferInterval: pulumi.Int(400),
CompressionFormat: pulumi.String("GZIP"),
},
HttpEndpointConfiguration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationArgs{
Url: pulumi.String("https://aws-api.newrelic.com/firehose/v1"),
Name: pulumi.String("New Relic"),
AccessKey: pulumi.String("my-key"),
BufferingSize: pulumi.Int(15),
BufferingInterval: pulumi.Int(600),
RoleArn: pulumi.Any(aws_iam_role.Firehose.Arn),
S3BackupMode: pulumi.String("FailedDataOnly"),
RequestConfiguration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs{
ContentEncoding: pulumi.String("GZIP"),
CommonAttributes: kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArray{
&kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs{
Name: pulumi.String("testname"),
Value: pulumi.String("testvalue"),
},
&kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs{
Name: pulumi.String("testname2"),
Value: pulumi.String("testvalue2"),
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamHttpEndpointConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.destination("http_endpoint")
.s3Configuration(FirehoseDeliveryStreamS3ConfigurationArgs.builder()
.roleArn(aws_iam_role.firehose().arn())
.bucketArn(aws_s3_bucket.bucket().arn())
.bufferSize(10)
.bufferInterval(400)
.compressionFormat("GZIP")
.build())
.httpEndpointConfiguration(FirehoseDeliveryStreamHttpEndpointConfigurationArgs.builder()
.url("https://aws-api.newrelic.com/firehose/v1")
.name("New Relic")
.accessKey("my-key")
.bufferingSize(15)
.bufferingInterval(600)
.roleArn(aws_iam_role.firehose().arn())
.s3BackupMode("FailedDataOnly")
.requestConfiguration(FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs.builder()
.contentEncoding("GZIP")
.commonAttributes(
FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs.builder()
.name("testname")
.value("testvalue")
.build(),
FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs.builder()
.name("testname2")
.value("testvalue2")
.build())
.build())
.build())
.build());
}
}
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="http_endpoint",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
http_endpoint_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationArgs(
url="https://aws-api.newrelic.com/firehose/v1",
name="New Relic",
access_key="my-key",
buffering_size=15,
buffering_interval=600,
role_arn=aws_iam_role["firehose"]["arn"],
s3_backup_mode="FailedDataOnly",
request_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs(
content_encoding="GZIP",
common_attributes=[
aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs(
name="testname",
value="testvalue",
),
aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs(
name="testname2",
value="testvalue2",
),
],
),
))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testStream = new aws.kinesis.FirehoseDeliveryStream("testStream", {
destination: "http_endpoint",
s3Configuration: {
roleArn: aws_iam_role.firehose.arn,
bucketArn: aws_s3_bucket.bucket.arn,
bufferSize: 10,
bufferInterval: 400,
compressionFormat: "GZIP",
},
httpEndpointConfiguration: {
url: "https://aws-api.newrelic.com/firehose/v1",
name: "New Relic",
accessKey: "my-key",
bufferingSize: 15,
bufferingInterval: 600,
roleArn: aws_iam_role.firehose.arn,
s3BackupMode: "FailedDataOnly",
requestConfiguration: {
contentEncoding: "GZIP",
commonAttributes: [
{
name: "testname",
value: "testvalue",
},
{
name: "testname2",
value: "testvalue2",
},
],
},
},
});
resources:
testStream:
type: aws:kinesis:FirehoseDeliveryStream
properties:
destination: http_endpoint
s3Configuration:
roleArn: ${aws_iam_role.firehose.arn}
bucketArn: ${aws_s3_bucket.bucket.arn}
bufferSize: 10
bufferInterval: 400
compressionFormat: GZIP
httpEndpointConfiguration:
url: https://aws-api.newrelic.com/firehose/v1
name: New Relic
accessKey: my-key
bufferingSize: 15
bufferingInterval: 600
roleArn: ${aws_iam_role.firehose.arn}
s3BackupMode: FailedDataOnly
requestConfiguration:
contentEncoding: GZIP
commonAttributes:
- name: testname
value: testvalue
- name: testname2
value: testvalue2
Create FirehoseDeliveryStream Resource
new FirehoseDeliveryStream(name: string, args: FirehoseDeliveryStreamArgs, opts?: CustomResourceOptions);
@overload
def FirehoseDeliveryStream(resource_name: str,
opts: Optional[ResourceOptions] = None,
arn: Optional[str] = None,
destination: Optional[str] = None,
destination_id: Optional[str] = None,
elasticsearch_configuration: Optional[FirehoseDeliveryStreamElasticsearchConfigurationArgs] = None,
extended_s3_configuration: Optional[FirehoseDeliveryStreamExtendedS3ConfigurationArgs] = None,
http_endpoint_configuration: Optional[FirehoseDeliveryStreamHttpEndpointConfigurationArgs] = None,
kinesis_source_configuration: Optional[FirehoseDeliveryStreamKinesisSourceConfigurationArgs] = None,
name: Optional[str] = None,
opensearch_configuration: Optional[FirehoseDeliveryStreamOpensearchConfigurationArgs] = None,
redshift_configuration: Optional[FirehoseDeliveryStreamRedshiftConfigurationArgs] = None,
s3_configuration: Optional[FirehoseDeliveryStreamS3ConfigurationArgs] = None,
server_side_encryption: Optional[FirehoseDeliveryStreamServerSideEncryptionArgs] = None,
splunk_configuration: Optional[FirehoseDeliveryStreamSplunkConfigurationArgs] = None,
tags: Optional[Mapping[str, str]] = None,
version_id: Optional[str] = None)
@overload
def FirehoseDeliveryStream(resource_name: str,
args: FirehoseDeliveryStreamArgs,
opts: Optional[ResourceOptions] = None)
func NewFirehoseDeliveryStream(ctx *Context, name string, args FirehoseDeliveryStreamArgs, opts ...ResourceOption) (*FirehoseDeliveryStream, error)
public FirehoseDeliveryStream(string name, FirehoseDeliveryStreamArgs args, CustomResourceOptions? opts = null)
public FirehoseDeliveryStream(String name, FirehoseDeliveryStreamArgs args)
public FirehoseDeliveryStream(String name, FirehoseDeliveryStreamArgs args, CustomResourceOptions options)
type: aws:kinesis:FirehoseDeliveryStream
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
FirehoseDeliveryStream Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The FirehoseDeliveryStream resource accepts the following input properties:
- Destination string
This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
andopensearch
.- Arn string
The Amazon Resource Name (ARN) specifying the Stream
- Destination
Id string - Elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Args Configuration options if elasticsearch is the destination. More details are given below.
- Extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Args Enhanced configuration options for the s3 destination. More details are given below.
- Http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration Args Configuration options if http_endpoint is the destination. requires the user to also specify a
s3_configuration
block. More details are given below.- Kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Args Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- Name string
A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details.- Opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration Args Configuration options if opensearch is the destination. More details are given below.
- Redshift
Configuration FirehoseDelivery Stream Redshift Configuration Args Configuration options if redshift is the destination. Using
redshift_configuration
requires the user to also specify as3_configuration
block. More details are given below.- S3Configuration
Firehose
Delivery Stream S3Configuration Args Required for non-S3 destinations. For S3 destination, use
extended_s3_configuration
instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- Server
Side FirehoseEncryption Delivery Stream Server Side Encryption Args Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- Splunk
Configuration FirehoseDelivery Stream Splunk Configuration Args Configuration options if splunk is the destination. More details are given below.
- Dictionary<string, string>
A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.- Version
Id string Specifies the table version for the output data schema. Defaults to
LATEST
.
- Destination string
This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
andopensearch
.- Arn string
The Amazon Resource Name (ARN) specifying the Stream
- Destination
Id string - Elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Args Configuration options if elasticsearch is the destination. More details are given below.
- Extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Args Enhanced configuration options for the s3 destination. More details are given below.
- Http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration Args Configuration options if http_endpoint is the destination. requires the user to also specify a
s3_configuration
block. More details are given below.- Kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Args Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- Name string
A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details.- Opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration Args Configuration options if opensearch is the destination. More details are given below.
- Redshift
Configuration FirehoseDelivery Stream Redshift Configuration Args Configuration options if redshift is the destination. Using
redshift_configuration
requires the user to also specify as3_configuration
block. More details are given below.- S3Configuration
Firehose
Delivery Stream S3Configuration Args Required for non-S3 destinations. For S3 destination, use
extended_s3_configuration
instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- Server
Side FirehoseEncryption Delivery Stream Server Side Encryption Args Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- Splunk
Configuration FirehoseDelivery Stream Splunk Configuration Args Configuration options if splunk is the destination. More details are given below.
- map[string]string
A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.- Version
Id string Specifies the table version for the output data schema. Defaults to
LATEST
.
- destination String
This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
andopensearch
.- arn String
The Amazon Resource Name (ARN) specifying the Stream
- destination
Id String - elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Args Configuration options if elasticsearch is the destination. More details are given below.
- extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Args Enhanced configuration options for the s3 destination. More details are given below.
- http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration Args Configuration options if http_endpoint is the destination. requires the user to also specify a
s3_configuration
block. More details are given below.- kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Args Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- name String
A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details.- opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration Args Configuration options if opensearch is the destination. More details are given below.
- redshift
Configuration FirehoseDelivery Stream Redshift Configuration Args Configuration options if redshift is the destination. Using
redshift_configuration
requires the user to also specify as3_configuration
block. More details are given below.- s3Configuration
Firehose
Delivery Stream S3Configuration Args Required for non-S3 destinations. For S3 destination, use
extended_s3_configuration
instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- server
Side FirehoseEncryption Delivery Stream Server Side Encryption Args Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- splunk
Configuration FirehoseDelivery Stream Splunk Configuration Args Configuration options if splunk is the destination. More details are given below.
- Map<String,String>
A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.- version
Id String Specifies the table version for the output data schema. Defaults to
LATEST
.
- destination string
This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
andopensearch
.- arn string
The Amazon Resource Name (ARN) specifying the Stream
- destination
Id string - elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Args Configuration options if elasticsearch is the destination. More details are given below.
- extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Args Enhanced configuration options for the s3 destination. More details are given below.
- http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration Args Configuration options if http_endpoint is the destination. requires the user to also specify a
s3_configuration
block. More details are given below.- kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Args Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- name string
A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details.- opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration Args Configuration options if opensearch is the destination. More details are given below.
- redshift
Configuration FirehoseDelivery Stream Redshift Configuration Args Configuration options if redshift is the destination. Using
redshift_configuration
requires the user to also specify as3_configuration
block. More details are given below.- s3Configuration
Firehose
Delivery Stream S3Configuration Args Required for non-S3 destinations. For S3 destination, use
extended_s3_configuration
instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- server
Side FirehoseEncryption Delivery Stream Server Side Encryption Args Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- splunk
Configuration FirehoseDelivery Stream Splunk Configuration Args Configuration options if splunk is the destination. More details are given below.
- {[key: string]: string}
A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.- version
Id string Specifies the table version for the output data schema. Defaults to
LATEST
.
- destination str
This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
andopensearch
.- arn str
The Amazon Resource Name (ARN) specifying the Stream
- destination_
id str - elasticsearch_
configuration FirehoseDelivery Stream Elasticsearch Configuration Args Configuration options if elasticsearch is the destination. More details are given below.
- extended_
s3_ Firehoseconfiguration Delivery Stream Extended S3Configuration Args Enhanced configuration options for the s3 destination. More details are given below.
- http_
endpoint_ Firehoseconfiguration Delivery Stream Http Endpoint Configuration Args Configuration options if http_endpoint is the destination. requires the user to also specify a
s3_configuration
block. More details are given below.- kinesis_
source_ Firehoseconfiguration Delivery Stream Kinesis Source Configuration Args Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- name str
A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details.- opensearch_
configuration FirehoseDelivery Stream Opensearch Configuration Args Configuration options if opensearch is the destination. More details are given below.
- redshift_
configuration FirehoseDelivery Stream Redshift Configuration Args Configuration options if redshift is the destination. Using
redshift_configuration
requires the user to also specify as3_configuration
block. More details are given below.- s3_
configuration FirehoseDelivery Stream S3Configuration Args Required for non-S3 destinations. For S3 destination, use
extended_s3_configuration
instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- server_
side_ Firehoseencryption Delivery Stream Server Side Encryption Args Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- splunk_
configuration FirehoseDelivery Stream Splunk Configuration Args Configuration options if splunk is the destination. More details are given below.
- Mapping[str, str]
A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.- version_
id str Specifies the table version for the output data schema. Defaults to
LATEST
.
- destination String
This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
andopensearch
.- arn String
The Amazon Resource Name (ARN) specifying the Stream
- destination
Id String - elasticsearch
Configuration Property Map Configuration options if elasticsearch is the destination. More details are given below.
- extended
S3Configuration Property Map Enhanced configuration options for the s3 destination. More details are given below.
- http
Endpoint Property MapConfiguration Configuration options if http_endpoint is the destination. requires the user to also specify a
s3_configuration
block. More details are given below.- kinesis
Source Property MapConfiguration Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- name String
A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details.- opensearch
Configuration Property Map Configuration options if opensearch is the destination. More details are given below.
- redshift
Configuration Property Map Configuration options if redshift is the destination. Using
redshift_configuration
requires the user to also specify as3_configuration
block. More details are given below.- s3Configuration Property Map
Required for non-S3 destinations. For S3 destination, use
extended_s3_configuration
instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- server
Side Property MapEncryption Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- splunk
Configuration Property Map Configuration options if splunk is the destination. More details are given below.
- Map<String>
A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.- version
Id String Specifies the table version for the output data schema. Defaults to
LATEST
.
Outputs
All input properties are implicitly available as output properties. Additionally, the FirehoseDeliveryStream resource produces the following output properties:
- Id string
The provider-assigned unique ID for this managed resource.
- Dictionary<string, string>
A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- Id string
The provider-assigned unique ID for this managed resource.
- map[string]string
A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- id String
The provider-assigned unique ID for this managed resource.
- Map<String,String>
A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- id string
The provider-assigned unique ID for this managed resource.
- {[key: string]: string}
A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- id str
The provider-assigned unique ID for this managed resource.
- Mapping[str, str]
A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- id String
The provider-assigned unique ID for this managed resource.
- Map<String>
A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
Look up Existing FirehoseDeliveryStream Resource
Get an existing FirehoseDeliveryStream resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: FirehoseDeliveryStreamState, opts?: CustomResourceOptions): FirehoseDeliveryStream
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
arn: Optional[str] = None,
destination: Optional[str] = None,
destination_id: Optional[str] = None,
elasticsearch_configuration: Optional[FirehoseDeliveryStreamElasticsearchConfigurationArgs] = None,
extended_s3_configuration: Optional[FirehoseDeliveryStreamExtendedS3ConfigurationArgs] = None,
http_endpoint_configuration: Optional[FirehoseDeliveryStreamHttpEndpointConfigurationArgs] = None,
kinesis_source_configuration: Optional[FirehoseDeliveryStreamKinesisSourceConfigurationArgs] = None,
name: Optional[str] = None,
opensearch_configuration: Optional[FirehoseDeliveryStreamOpensearchConfigurationArgs] = None,
redshift_configuration: Optional[FirehoseDeliveryStreamRedshiftConfigurationArgs] = None,
s3_configuration: Optional[FirehoseDeliveryStreamS3ConfigurationArgs] = None,
server_side_encryption: Optional[FirehoseDeliveryStreamServerSideEncryptionArgs] = None,
splunk_configuration: Optional[FirehoseDeliveryStreamSplunkConfigurationArgs] = None,
tags: Optional[Mapping[str, str]] = None,
tags_all: Optional[Mapping[str, str]] = None,
version_id: Optional[str] = None) -> FirehoseDeliveryStream
func GetFirehoseDeliveryStream(ctx *Context, name string, id IDInput, state *FirehoseDeliveryStreamState, opts ...ResourceOption) (*FirehoseDeliveryStream, error)
public static FirehoseDeliveryStream Get(string name, Input<string> id, FirehoseDeliveryStreamState? state, CustomResourceOptions? opts = null)
public static FirehoseDeliveryStream get(String name, Output<String> id, FirehoseDeliveryStreamState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Arn string
The Amazon Resource Name (ARN) specifying the Stream
- Destination string
This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
andopensearch
.- Destination
Id string - Elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Args Configuration options if elasticsearch is the destination. More details are given below.
- Extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Args Enhanced configuration options for the s3 destination. More details are given below.
- Http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration Args Configuration options if http_endpoint is the destination. requires the user to also specify a
s3_configuration
block. More details are given below.- Kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Args Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- Name string
A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details.- Opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration Args Configuration options if opensearch is the destination. More details are given below.
- Redshift
Configuration FirehoseDelivery Stream Redshift Configuration Args Configuration options if redshift is the destination. Using
redshift_configuration
requires the user to also specify as3_configuration
block. More details are given below.- S3Configuration
Firehose
Delivery Stream S3Configuration Args Required for non-S3 destinations. For S3 destination, use
extended_s3_configuration
instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- Server
Side FirehoseEncryption Delivery Stream Server Side Encryption Args Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- Splunk
Configuration FirehoseDelivery Stream Splunk Configuration Args Configuration options if splunk is the destination. More details are given below.
- Dictionary<string, string>
A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.- Dictionary<string, string>
A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.- Version
Id string Specifies the table version for the output data schema. Defaults to
LATEST
.
- Arn string
The Amazon Resource Name (ARN) specifying the Stream
- Destination string
This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
andopensearch
.- Destination
Id string - Elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Args Configuration options if elasticsearch is the destination. More details are given below.
- Extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Args Enhanced configuration options for the s3 destination. More details are given below.
- Http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration Args Configuration options if http_endpoint is the destination. requires the user to also specify a
s3_configuration
block. More details are given below.- Kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Args Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- Name string
A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details.- Opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration Args Configuration options if opensearch is the destination. More details are given below.
- Redshift
Configuration FirehoseDelivery Stream Redshift Configuration Args Configuration options if redshift is the destination. Using
redshift_configuration
requires the user to also specify as3_configuration
block. More details are given below.- S3Configuration
Firehose
Delivery Stream S3Configuration Args Required for non-S3 destinations. For S3 destination, use
extended_s3_configuration
instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- Server
Side FirehoseEncryption Delivery Stream Server Side Encryption Args Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- Splunk
Configuration FirehoseDelivery Stream Splunk Configuration Args Configuration options if splunk is the destination. More details are given below.
- map[string]string
A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.- map[string]string
A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.- Version
Id string Specifies the table version for the output data schema. Defaults to
LATEST
.
- arn String
The Amazon Resource Name (ARN) specifying the Stream
- destination String
This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
andopensearch
.- destination
Id String - elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Args Configuration options if elasticsearch is the destination. More details are given below.
- extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Args Enhanced configuration options for the s3 destination. More details are given below.
- http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration Args Configuration options if http_endpoint is the destination. requires the user to also specify a
s3_configuration
block. More details are given below.- kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Args Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- name String
A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details.- opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration Args Configuration options if opensearch is the destination. More details are given below.
- redshift
Configuration FirehoseDelivery Stream Redshift Configuration Args Configuration options if redshift is the destination. Using
redshift_configuration
requires the user to also specify as3_configuration
block. More details are given below.- s3Configuration
Firehose
Delivery Stream S3Configuration Args Required for non-S3 destinations. For S3 destination, use
extended_s3_configuration
instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- server
Side FirehoseEncryption Delivery Stream Server Side Encryption Args Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- splunk
Configuration FirehoseDelivery Stream Splunk Configuration Args Configuration options if splunk is the destination. More details are given below.
- Map<String,String>
A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.- Map<String,String>
A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.- version
Id String Specifies the table version for the output data schema. Defaults to
LATEST
.
- arn string
The Amazon Resource Name (ARN) specifying the Stream
- destination string
This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
andopensearch
.- destination
Id string - elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Args Configuration options if elasticsearch is the destination. More details are given below.
- extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Args Enhanced configuration options for the s3 destination. More details are given below.
- http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration Args Configuration options if http_endpoint is the destination. requires the user to also specify a
s3_configuration
block. More details are given below.- kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Args Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- name string
A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details.- opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration Args Configuration options if opensearch is the destination. More details are given below.
- redshift
Configuration FirehoseDelivery Stream Redshift Configuration Args Configuration options if redshift is the destination. Using
redshift_configuration
requires the user to also specify as3_configuration
block. More details are given below.- s3Configuration
Firehose
Delivery Stream S3Configuration Args Required for non-S3 destinations. For S3 destination, use
extended_s3_configuration
instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- server
Side FirehoseEncryption Delivery Stream Server Side Encryption Args Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- splunk
Configuration FirehoseDelivery Stream Splunk Configuration Args Configuration options if splunk is the destination. More details are given below.
- {[key: string]: string}
A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.- {[key: string]: string}
A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.- version
Id string Specifies the table version for the output data schema. Defaults to
LATEST
.
- arn str
The Amazon Resource Name (ARN) specifying the Stream
- destination str
This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
andopensearch
.- destination_
id str - elasticsearch_
configuration FirehoseDelivery Stream Elasticsearch Configuration Args Configuration options if elasticsearch is the destination. More details are given below.
- extended_
s3_ Firehoseconfiguration Delivery Stream Extended S3Configuration Args Enhanced configuration options for the s3 destination. More details are given below.
- http_
endpoint_ Firehoseconfiguration Delivery Stream Http Endpoint Configuration Args Configuration options if http_endpoint is the destination. requires the user to also specify a
s3_configuration
block. More details are given below.- kinesis_
source_ Firehoseconfiguration Delivery Stream Kinesis Source Configuration Args Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- name str
A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details.- opensearch_
configuration FirehoseDelivery Stream Opensearch Configuration Args Configuration options if opensearch is the destination. More details are given below.
- redshift_
configuration FirehoseDelivery Stream Redshift Configuration Args Configuration options if redshift is the destination. Using
redshift_configuration
requires the user to also specify as3_configuration
block. More details are given below.- s3_
configuration FirehoseDelivery Stream S3Configuration Args Required for non-S3 destinations. For S3 destination, use
extended_s3_configuration
instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- server_
side_ Firehoseencryption Delivery Stream Server Side Encryption Args Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- splunk_
configuration FirehoseDelivery Stream Splunk Configuration Args Configuration options if splunk is the destination. More details are given below.
- Mapping[str, str]
A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.- Mapping[str, str]
A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.- version_
id str Specifies the table version for the output data schema. Defaults to
LATEST
.
- arn String
The Amazon Resource Name (ARN) specifying the Stream
- destination String
This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
andopensearch
.- destination
Id String - elasticsearch
Configuration Property Map Configuration options if elasticsearch is the destination. More details are given below.
- extended
S3Configuration Property Map Enhanced configuration options for the s3 destination. More details are given below.
- http
Endpoint Property MapConfiguration Configuration options if http_endpoint is the destination. requires the user to also specify a
s3_configuration
block. More details are given below.- kinesis
Source Property MapConfiguration Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
- name String
A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details.- opensearch
Configuration Property Map Configuration options if opensearch is the destination. More details are given below.
- redshift
Configuration Property Map Configuration options if redshift is the destination. Using
redshift_configuration
requires the user to also specify as3_configuration
block. More details are given below.- s3Configuration Property Map
Required for non-S3 destinations. For S3 destination, use
extended_s3_configuration
instead. Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below.- server
Side Property MapEncryption Encrypt at rest options. Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- splunk
Configuration Property Map Configuration options if splunk is the destination. More details are given below.
- Map<String>
A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.- Map<String>
A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.- version
Id String Specifies the table version for the output data schema. Defaults to
LATEST
.
Supporting Types
FirehoseDeliveryStreamElasticsearchConfiguration
- Index
Name string The Elasticsearch index name.
- Role
Arn string The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeElasticsearchDomain
,DescribeElasticsearchDomains
, andDescribeElasticsearchDomainConfig
. The pattern needs to bearn:.*
.- Buffering
Interval int Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.
- Buffering
Size int Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- Cluster
Endpoint string The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
.- Domain
Arn string The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
.- Index
Rotation stringPeriod The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
.- Processing
Configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration The data processing configuration. More details are given below.
- Retry
Duration int After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
.- Type
Name string The Elasticsearch type name with maximum length of 100 characters.
- Vpc
Config FirehoseDelivery Stream Elasticsearch Configuration Vpc Config The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below
- Index
Name string The Elasticsearch index name.
- Role
Arn string The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeElasticsearchDomain
,DescribeElasticsearchDomains
, andDescribeElasticsearchDomainConfig
. The pattern needs to bearn:.*
.- Buffering
Interval int Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.
- Buffering
Size int Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- Cluster
Endpoint string The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
.- Domain
Arn string The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
.- Index
Rotation stringPeriod The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
.- Processing
Configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration The data processing configuration. More details are given below.
- Retry
Duration int After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
.- Type
Name string The Elasticsearch type name with maximum length of 100 characters.
- Vpc
Config FirehoseDelivery Stream Elasticsearch Configuration Vpc Config The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below
- index
Name String The Elasticsearch index name.
- role
Arn String The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeElasticsearchDomain
,DescribeElasticsearchDomains
, andDescribeElasticsearchDomainConfig
. The pattern needs to bearn:.*
.- buffering
Interval Integer Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size Integer Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- cluster
Endpoint String The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
.- domain
Arn String The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
.- index
Rotation StringPeriod The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
.- processing
Configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration The data processing configuration. More details are given below.
- retry
Duration Integer After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode String Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
.- type
Name String The Elasticsearch type name with maximum length of 100 characters.
- vpc
Config FirehoseDelivery Stream Elasticsearch Configuration Vpc Config The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below
- index
Name string The Elasticsearch index name.
- role
Arn string The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeElasticsearchDomain
,DescribeElasticsearchDomains
, andDescribeElasticsearchDomainConfig
. The pattern needs to bearn:.*
.- buffering
Interval number Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size number Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- cluster
Endpoint string The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
.- domain
Arn string The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
.- index
Rotation stringPeriod The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
.- processing
Configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration The data processing configuration. More details are given below.
- retry
Duration number After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode string Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
.- type
Name string The Elasticsearch type name with maximum length of 100 characters.
- vpc
Config FirehoseDelivery Stream Elasticsearch Configuration Vpc Config The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below
- index_
name str The Elasticsearch index name.
- role_
arn str The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeElasticsearchDomain
,DescribeElasticsearchDomains
, andDescribeElasticsearchDomainConfig
. The pattern needs to bearn:.*
.- buffering_
interval int Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.
- buffering_
size int Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- cluster_
endpoint str The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
.- domain_
arn str The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
.- index_
rotation_ strperiod The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
.- processing_
configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration The data processing configuration. More details are given below.
- retry_
duration int After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3_
backup_ strmode Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
.- type_
name str The Elasticsearch type name with maximum length of 100 characters.
- vpc_
config FirehoseDelivery Stream Elasticsearch Configuration Vpc Config The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below
- index
Name String The Elasticsearch index name.
- role
Arn String The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeElasticsearchDomain
,DescribeElasticsearchDomains
, andDescribeElasticsearchDomainConfig
. The pattern needs to bearn:.*
.- buffering
Interval Number Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size Number Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging Property MapOptions The CloudWatch Logging Options for the delivery stream. More details are given below
- cluster
Endpoint String The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
.- domain
Arn String The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
.- index
Rotation StringPeriod The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
.- processing
Configuration Property Map The data processing configuration. More details are given below.
- retry
Duration Number After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode String Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
.- type
Name String The Elasticsearch type name with maximum length of 100 characters.
- vpc
Config Property Map The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below
FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions
- Enabled bool
Enables or disables the logging. Defaults to
false
.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabled
is true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
Enables or disables the logging. Defaults to
false
.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabled
is true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
Enables or disables the logging. Defaults to
false
.- log
Group StringName The CloudWatch group name for logging. This value is required if
enabled
is true.- log
Stream StringName The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
Enables or disables the logging. Defaults to
false
.- log
Group stringName The CloudWatch group name for logging. This value is required if
enabled
is true.- log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
Enables or disables the logging. Defaults to
false
.- log_
group_ strname The CloudWatch group name for logging. This value is required if
enabled
is true.- log_
stream_ strname The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
Enables or disables the logging. Defaults to
false
.- log
Group StringName The CloudWatch group name for logging. This value is required if
enabled
is true.- log
Stream StringName The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration
- Enabled bool
Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor> Array of data processors. More details are given below
- Enabled bool
Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Array of data processors. More details are given below
- enabled Boolean
Enables or disables data processing.
- processors
List<Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor> Array of data processors. More details are given below
- enabled boolean
Enables or disables data processing.
- processors
Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor[] Array of data processors. More details are given below
- enabled bool
Enables or disables data processing.
- processors
Sequence[Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor] Array of data processors. More details are given below
- enabled Boolean
Enables or disables data processing.
- processors List<Property Map>
Array of data processors. More details are given below
FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor
- Type string
The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- Parameters
List<Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter> Array of processor parameters. More details are given below
- Type string
The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- Parameters
[]Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter Array of processor parameters. More details are given below
- type String
The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- parameters
List<Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter> Array of processor parameters. More details are given below
- type string
The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- parameters
Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter[] Array of processor parameters. More details are given below
- type str
The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- parameters
Sequence[Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter] Array of processor parameters. More details are given below
- type String
The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- parameters List<Property Map>
Array of processor parameters. More details are given below
FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter
- Parameter
Name string Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- Parameter
Name string Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- parameter
Name String Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- parameter
Name string Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- parameter_
name str Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- parameter_
value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
- parameter
Name String Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig
- Role
Arn string The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- Security
Group List<string>Ids A list of security group IDs to associate with Kinesis Firehose.
- Subnet
Ids List<string> A list of subnet IDs to associate with Kinesis Firehose.
- Vpc
Id string
- Role
Arn string The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- Security
Group []stringIds A list of security group IDs to associate with Kinesis Firehose.
- Subnet
Ids []string A list of subnet IDs to associate with Kinesis Firehose.
- Vpc
Id string
- role
Arn String The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security
Group List<String>Ids A list of security group IDs to associate with Kinesis Firehose.
- subnet
Ids List<String> A list of subnet IDs to associate with Kinesis Firehose.
- vpc
Id String
- role
Arn string The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security
Group string[]Ids A list of security group IDs to associate with Kinesis Firehose.
- subnet
Ids string[] A list of subnet IDs to associate with Kinesis Firehose.
- vpc
Id string
- role_
arn str The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security_
group_ Sequence[str]ids A list of security group IDs to associate with Kinesis Firehose.
- subnet_
ids Sequence[str] A list of subnet IDs to associate with Kinesis Firehose.
- vpc_
id str
- role
Arn String The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security
Group List<String>Ids A list of security group IDs to associate with Kinesis Firehose.
- subnet
Ids List<String> A list of subnet IDs to associate with Kinesis Firehose.
- vpc
Id String
FirehoseDeliveryStreamExtendedS3Configuration
- Bucket
Arn string The ARN of the S3 bucket
- Role
Arn string The ARN of the role that provides access to the source Kinesis stream.
- Buffer
Interval int Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffer
Size int Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- Compression
Format string The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
.- Data
Format FirehoseConversion Configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.
- Dynamic
Partitioning FirehoseConfiguration Delivery Stream Extended S3Configuration Dynamic Partitioning Configuration The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.
- Error
Output stringPrefix Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.- Kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Processing
Configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration The data processing configuration. More details are given below.
- S3Backup
Configuration FirehoseDelivery Stream Extended S3Configuration S3Backup Configuration The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object.- S3Backup
Mode string The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
.
- Bucket
Arn string The ARN of the S3 bucket
- Role
Arn string The ARN of the role that provides access to the source Kinesis stream.
- Buffer
Interval int Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffer
Size int Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- Compression
Format string The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
.- Data
Format FirehoseConversion Configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.
- Dynamic
Partitioning FirehoseConfiguration Delivery Stream Extended S3Configuration Dynamic Partitioning Configuration The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.
- Error
Output stringPrefix Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.- Kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Processing
Configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration The data processing configuration. More details are given below.
- S3Backup
Configuration FirehoseDelivery Stream Extended S3Configuration S3Backup Configuration The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object.- S3Backup
Mode string The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
.
- bucket
Arn String The ARN of the S3 bucket
- role
Arn String The ARN of the role that provides access to the source Kinesis stream.
- buffer
Interval Integer Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffer
Size Integer Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- compression
Format String The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
.- data
Format FirehoseConversion Configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.
- dynamic
Partitioning FirehoseConfiguration Delivery Stream Extended S3Configuration Dynamic Partitioning Configuration The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.
- error
Output StringPrefix Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.- kms
Key StringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- processing
Configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration The data processing configuration. More details are given below.
- s3Backup
Configuration FirehoseDelivery Stream Extended S3Configuration S3Backup Configuration The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object.- s3Backup
Mode String The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
.
- bucket
Arn string The ARN of the S3 bucket
- role
Arn string The ARN of the role that provides access to the source Kinesis stream.
- buffer
Interval number Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffer
Size number Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- compression
Format string The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
.- data
Format FirehoseConversion Configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.
- dynamic
Partitioning FirehoseConfiguration Delivery Stream Extended S3Configuration Dynamic Partitioning Configuration The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.
- error
Output stringPrefix Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.- kms
Key stringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- processing
Configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration The data processing configuration. More details are given below.
- s3Backup
Configuration FirehoseDelivery Stream Extended S3Configuration S3Backup Configuration The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object.- s3Backup
Mode string The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
.
- bucket_
arn str The ARN of the S3 bucket
- role_
arn str The ARN of the role that provides access to the source Kinesis stream.
- buffer_
interval int Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffer_
size int Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options The CloudWatch Logging Options for the delivery stream. More details are given below
- compression_
format str The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
.- data_
format_ Firehoseconversion_ configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.
- dynamic_
partitioning_ Firehoseconfiguration Delivery Stream Extended S3Configuration Dynamic Partitioning Configuration The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.
- error_
output_ strprefix Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.- kms_
key_ strarn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- processing_
configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration The data processing configuration. More details are given below.
- s3_
backup_ Firehoseconfiguration Delivery Stream Extended S3Configuration S3Backup Configuration The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object.- s3_
backup_ strmode The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
.
- bucket
Arn String The ARN of the S3 bucket
- role
Arn String The ARN of the role that provides access to the source Kinesis stream.
- buffer
Interval Number Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffer
Size Number Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging Property MapOptions The CloudWatch Logging Options for the delivery stream. More details are given below
- compression
Format String The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
.- data
Format Property MapConversion Configuration Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. More details given below.
- dynamic
Partitioning Property MapConfiguration The configuration for dynamic partitioning. See Dynamic Partitioning Configuration below for more details. Required when using dynamic partitioning.
- error
Output StringPrefix Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.- kms
Key StringArn Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- processing
Configuration Property Map The data processing configuration. More details are given below.
- s3Backup
Configuration Property Map The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object.- s3Backup
Mode String The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
.
FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions
- Enabled bool
Enables or disables the logging. Defaults to
false
.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabled
is true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
Enables or disables the logging. Defaults to
false
.- Log
Group stringName The CloudWatch group name for logging. This value is required if
enabled
is true.- Log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
Enables or disables the logging. Defaults to
false
.- log
Group StringName The CloudWatch group name for logging. This value is required if
enabled
is true.- log
Stream StringName The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
Enables or disables the logging. Defaults to
false
.- log
Group stringName The CloudWatch group name for logging. This value is required if
enabled
is true.- log
Stream stringName The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
Enables or disables the logging. Defaults to
false
.- log_
group_ strname The CloudWatch group name for logging. This value is required if
enabled
is true.- log_
stream_ strname The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
Enables or disables the logging. Defaults to
false
.- log
Group StringName The CloudWatch group name for logging. This value is required if
enabled
is true.- log
Stream StringName The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration
- Input
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.
- Output
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.
- Schema
Configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.
- Enabled bool
Defaults to
true
. Set it tofalse
if you want to disable format conversion while preserving the configuration details.
- Input
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.
- Output
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.
- Schema
Configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.
- Enabled bool
Defaults to
true
. Set it tofalse
if you want to disable format conversion while preserving the configuration details.
- input
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.
- output
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.
- schema
Configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.
- enabled Boolean
Defaults to
true
. Set it tofalse
if you want to disable format conversion while preserving the configuration details.
- input
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.
- output
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.
- schema
Configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.
- enabled boolean
Defaults to
true
. Set it tofalse
if you want to disable format conversion while preserving the configuration details.
- input_
format_ Firehoseconfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.
- output_
format_ Firehoseconfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.
- schema_
configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.
- enabled bool
Defaults to
true
. Set it tofalse
if you want to disable format conversion while preserving the configuration details.
- input
Format Property MapConfiguration Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.
- output
Format Property MapConfiguration Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.
- schema
Configuration Property Map Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.
- enabled Boolean
Defaults to
true
. Set it tofalse
if you want to disable format conversion while preserving the configuration details.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration
- Deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.
- Deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.
- deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.
- deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.
- deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.
- deserializer Property Map
Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer
- Hive
Json FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.
- Open
XJson FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De Nested argument that specifies the OpenX SerDe. More details below.
- Hive
Json FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.
- Open
XJson FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De Nested argument that specifies the OpenX SerDe. More details below.
- hive
Json FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.
- open
XJson FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De Nested argument that specifies the OpenX SerDe. More details below.
- hive
Json FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.
- open
XJson FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De Nested argument that specifies the OpenX SerDe. More details below.
- hive_
json_ Firehoseser_ de Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.
- open_
x_ Firehosejson_ ser_ de Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De Nested argument that specifies the OpenX SerDe. More details below.
- hive
Json Property MapSer De Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.
- open
XJson Property MapSer De Nested argument that specifies the OpenX SerDe. More details below.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDe
- Timestamp
Formats List<string> A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- Timestamp
Formats []string A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- timestamp
Formats List<String> A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- timestamp
Formats string[] A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- timestamp_
formats Sequence[str] A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- timestamp
Formats List<String> A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDe
- Case
Insensitive bool When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- Column
To Dictionary<string, string>Json Key Mappings A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }
to map this key to a column named ts.- Convert
Dots boolIn Json Keys To Underscores When set to
true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults tofalse
.
- Case
Insensitive bool When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- Column
To map[string]stringJson Key Mappings A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }
to map this key to a column named ts.- Convert
Dots boolIn Json Keys To Underscores When set to
true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults tofalse
.
- case
Insensitive Boolean When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- column
To Map<String,String>Json Key Mappings A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }
to map this key to a column named ts.- convert
Dots BooleanIn Json Keys To Underscores When set to
true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults tofalse
.
- case
Insensitive boolean When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- column
To {[key: string]: string}Json Key Mappings A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }
to map this key to a column named ts.- convert
Dots booleanIn Json Keys To Underscores When set to
true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults tofalse
.
- case_
insensitive bool When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- column_
to_ Mapping[str, str]json_ key_ mappings A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }
to map this key to a column named ts.- convert_
dots_ boolin_ json_ keys_ to_ underscores When set to
true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults tofalse
.
- case
Insensitive Boolean When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- column
To Map<String>Json Key Mappings A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }
to map this key to a column named ts.- convert
Dots BooleanIn Json Keys To Underscores When set to
true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults tofalse
.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration
- Serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.
- Serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.
- serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.
- serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.
- serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.
- serializer Property Map
Nested argument that specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. More details below.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer
- Orc
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.
- Parquet
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- Orc
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.
- Parquet
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- orc
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.
- parquet
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- orc
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.
- parquet
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- orc_
ser_ Firehosede Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.
- parquet_
ser_ Firehosede Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- orc
Ser Property MapDe Nested argument that specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. More details below.
- parquet
Ser Property MapDe Nested argument that specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe
- Block
Size intBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- Bloom
Filter List<string>Columns A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- Bloom
Filter doubleFalse Positive Probability The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05
, the minimum is0
, and the maximum is1
.- Compression string
The compression code to use over data blocks. The default is
SNAPPY
.- Dictionary
Key doubleThreshold A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1
.- Enable
Padding bool Set this to
true
to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse
.- Format
Version string The version of the file to write. The possible values are
V0_11
andV0_12
. The default isV0_12
.- Padding
Tolerance double A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05
, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_padding
isfalse
.- Row
Index intStride The number of rows between index entries. The default is
10000
and the minimum is1000
.- Stripe
Size intBytes The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- Block
Size intBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- Bloom
Filter []stringColumns A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- Bloom
Filter float64False Positive Probability The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05
, the minimum is0
, and the maximum is1
.- Compression string
The compression code to use over data blocks. The default is
SNAPPY
.- Dictionary
Key float64Threshold A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1
.- Enable
Padding bool Set this to
true
to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse
.- Format
Version string The version of the file to write. The possible values are
V0_11
andV0_12
. The default isV0_12
.- Padding
Tolerance float64 A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05
, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_padding
isfalse
.- Row
Index intStride The number of rows between index entries. The default is
10000
and the minimum is1000
.- Stripe
Size intBytes The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- block
Size IntegerBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- bloom
Filter List<String>Columns A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- bloom
Filter DoubleFalse Positive Probability The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05
, the minimum is0
, and the maximum is1
.- compression String
The compression code to use over data blocks. The default is
SNAPPY
.- dictionary
Key DoubleThreshold A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1
.- enable
Padding Boolean Set this to
true
to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse
.- format
Version String The version of the file to write. The possible values are
V0_11
andV0_12
. The default isV0_12
.- padding
Tolerance Double A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05
, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_padding
isfalse
.- row
Index IntegerStride The number of rows between index entries. The default is
10000
and the minimum is1000
.- stripe
Size IntegerBytes The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- block
Size numberBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- bloom
Filter string[]Columns A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- bloom
Filter numberFalse Positive Probability The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05
, the minimum is0
, and the maximum is1
.- compression string
The compression code to use over data blocks. The default is
SNAPPY
.- dictionary
Key numberThreshold A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1
.- enable
Padding boolean Set this to
true
to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse
.- format
Version string The version of the file to write. The possible values are
V0_11
andV0_12
. The default isV0_12
.- padding
Tolerance number A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05
, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_padding
isfalse
.- row
Index numberStride The number of rows between index entries. The default is
10000
and the minimum is1000
.- stripe
Size numberBytes The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- block_
size_ intbytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- bloom_
filter_ Sequence[str]columns A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- bloom_
filter_ floatfalse_ positive_ probability The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05
, the minimum is0
, and the maximum is1
.- compression str
The compression code to use over data blocks. The default is
SNAPPY
.- dictionary_
key_ floatthreshold A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1
.- enable_
padding bool Set this to
true
to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse
.- format_
version str The version of the file to write. The possible values are
V0_11
andV0_12
. The default isV0_12
.- padding_
tolerance float A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05
, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_padding
isfalse
.- row_
index_ intstride The number of rows between index entries. The default is
10000
and the minimum is1000
.- stripe_
size_ intbytes The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- block
Size NumberBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- bloom
Filter List<String>Columns A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- bloom
Filter NumberFalse Positive Probability The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05
, the minimum is0
, and the maximum is1
.- compression String
The compression code to use over data blocks. The default is
SNAPPY
.- dictionary
Key NumberThreshold A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1
.- enable
Padding Boolean Set this to
true
to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse
.- format
Version String The version of the file to write. The possible values are
V0_11
andV0_12
. The default isV0_12
.- padding
Tolerance Number A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05
, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_padding
isfalse
.- row
Index NumberStride The number of rows between index entries. The default is
10000
and the minimum is1000
.- stripe
Size NumberBytes The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe
- Block
Size intBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- Compression string
The compression code to use over data blocks. The possible values are
UNCOMPRESSED
,SNAPPY
, andGZIP
, with the default beingSNAPPY
. UseSNAPPY
for higher decompression speed. UseGZIP
if the compression ratio is more important than speed.- Enable
Dictionary boolCompression Indicates whether to enable dictionary compression.
- Max
Padding intBytes The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0
.- Page
Size intBytes The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- Writer
Version string Indicates the version of row format to output. The possible values are
V1
andV2
. The default isV1
.
- Block
Size intBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- Compression string
The compression code to use over data blocks. The possible values are
UNCOMPRESSED
,SNAPPY
, andGZIP
, with the default beingSNAPPY
. UseSNAPPY
for higher decompression speed. UseGZIP
if the compression ratio is more important than speed.- Enable
Dictionary boolCompression Indicates whether to enable dictionary compression.
- Max
Padding intBytes The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0
.- Page
Size intBytes The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- Writer
Version string Indicates the version of row format to output. The possible values are
V1
andV2
. The default isV1
.
- block
Size IntegerBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- compression String
The compression code to use over data blocks. The possible values are
UNCOMPRESSED
,SNAPPY
, andGZIP
, with the default beingSNAPPY
. UseSNAPPY
for higher decompression speed. UseGZIP
if the compression ratio is more important than speed.- enable
Dictionary BooleanCompression Indicates whether to enable dictionary compression.
- max
Padding IntegerBytes The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0
.- page
Size IntegerBytes The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- writer
Version String Indicates the version of row format to output. The possible values are
V1
andV2
. The default isV1
.
- block
Size numberBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- compression string
The compression code to use over data blocks. The possible values are
UNCOMPRESSED
,SNAPPY
, andGZIP
, with the default beingSNAPPY
. UseSNAPPY
for higher decompression speed. UseGZIP
if the compression ratio is more important than speed.- enable
Dictionary booleanCompression Indicates whether to enable dictionary compression.
- max
Padding numberBytes The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0
.- page
Size numberBytes The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- writer
Version string Indicates the version of row format to output. The possible values are
V1
andV2
. The default isV1
.
- block_
size_ intbytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- compression str
The compression code to use over data blocks. The possible values are
UNCOMPRESSED
,SNAPPY
, andGZIP
, with the default beingSNAPPY
. UseSNAPPY
for higher decompression speed. UseGZIP
if the compression ratio is more important than speed.- enable_
dictionary_ boolcompression Indicates whether to enable dictionary compression.
- max_
padding_ intbytes The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0
.- page_
size_ intbytes The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- writer_
version str Indicates the version of row format to output. The possible values are
V1
andV2
. The default isV1
.
- block
Size NumberBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- compression String
The compression code to use over data blocks. The possible values are
UNCOMPRESSED
,SNAPPY
, andGZIP
, with the default beingSNAPPY
. UseSNAPPY
for higher decompression speed. UseGZIP
if the compression ratio is more important than speed.- enable
Dictionary BooleanCompression Indicates whether to enable dictionary compression.
- max
Padding NumberBytes The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0
.- page
Size NumberBytes The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- writer
Version String Indicates the version of row format to output. The possible values are
V1
andV2
. The default isV1
.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration
- Database
Name string Specifies the name of the AWS Glue database that contains the schema for the output data.
- Role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
- Table
Name string Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- Catalog
Id string The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
- Region string
If you don't specify an AWS Region, the default is the current region.
- Version
Id string Specifies the table version for the output data schema. Defaults to
LATEST
.
- Database
Name string Specifies the name of the AWS Glue database that contains the schema for the output data.
- Role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
- Table
Name string Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- Catalog
Id string The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
- Region string
If you don't specify an AWS Region, the default is the current region.
- Version
Id string Specifies the table version for the output data schema. Defaults to
LATEST
.
- database
Name String Specifies the name of the AWS Glue database that contains the schema for the output data.
- role
Arn String The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
- table
Name String Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- catalog
Id String The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
- region String
If you don't specify an AWS Region, the default is the current region.
- version
Id String Specifies the table version for the output data schema. Defaults to
LATEST
.
- database
Name string Specifies the name of the AWS Glue database that contains the schema for the output data.
- role
Arn string The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
- table
Name string Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- catalog
Id string The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
- region string
If you don't specify an AWS Region, the default is the current region.
- version
Id string Specifies the table version for the output data schema. Defaults to
LATEST
.
- database_
name str Specifies the name of the AWS Glue database that contains the schema for the output data.
- role_
arn str The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
- table_
name str Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- catalog_
id str The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
- region str
If you don't specify an AWS Region, the default is the current region.
- version_
id str Specifies the table version for the output data schema. Defaults to
LATEST
.
- database
Name String Specifies the name of the AWS Glue database that contains the schema for the output data.
- role
Arn String The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
- table
Name String Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- catalog
Id String The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
- region String
If you don't specify an AWS Region, the default is the current region.
- version
Id String Specifies the table version for the output data schema. Defaults to
LATEST
.
FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration
- Enabled bool
Enables or disables dynamic partitioning. Defaults to
false
.- Retry
Duration int Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.
- Enabled bool
Enables or disables dynamic partitioning. Defaults to
false
.- Retry
Duration int Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.
- enabled Boolean
Enables or disables dynamic partitioning. Defaults to
false
.- retry
Duration Integer Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.
- enabled boolean
Enables or disables dynamic partitioning. Defaults to
false
.- retry
Duration number Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.
- enabled bool
Enables or disables dynamic partitioning. Defaults to
false
.- retry_
duration int Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.
- enabled Boolean
Enables or disables dynamic partitioning. Defaults to
false
.- retry
Duration Number Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration
- Enabled bool
Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor> Array of data processors. More details are given below
- Enabled bool
Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Array of data processors. More details are given below
- enabled Boolean
Enables or disables data processing.
- processors
List<Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor> Array of data processors. More details are given below
- enabled boolean
Enables or disables data processing.
- processors
Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor[] Array of data processors. More details are given below
- enabled bool
Enables or disables data processing.
- processors
Sequence[Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor] Array of data processors. More details are given below
- enabled Boolean
Enables or disables data processing.
- processors List<Property Map>
Array of data processors. More details are given below
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessor
- Type string
The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
. Validation is done against AWS SDK constants; so that values not explicitly listed may also work.- Parameters
List<Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Parameter> Array of processor parameters. More details are given below
- Type string
The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRe