1. Packages
  2. AWS Classic
  3. API Docs
  4. lambda
  5. EventSourceMapping

Try AWS Native preview for resources not in the classic version.

AWS Classic v6.40.0 published on Wednesday, Jun 12, 2024 by Pulumi

aws.lambda.EventSourceMapping

Explore with Pulumi AI

aws logo

Try AWS Native preview for resources not in the classic version.

AWS Classic v6.40.0 published on Wednesday, Jun 12, 2024 by Pulumi

    Provides a Lambda event source mapping. This allows Lambda functions to get events from Kinesis, DynamoDB, SQS, Amazon MQ and Managed Streaming for Apache Kafka (MSK).

    For information about Lambda and how to use it, see What is AWS Lambda?. For information about event source mappings, see CreateEventSourceMapping in the API docs.

    Example Usage

    DynamoDB

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const example = new aws.lambda.EventSourceMapping("example", {
        eventSourceArn: exampleAwsDynamodbTable.streamArn,
        functionName: exampleAwsLambdaFunction.arn,
        startingPosition: "LATEST",
    });
    
    import pulumi
    import pulumi_aws as aws
    
    example = aws.lambda_.EventSourceMapping("example",
        event_source_arn=example_aws_dynamodb_table["streamArn"],
        function_name=example_aws_lambda_function["arn"],
        starting_position="LATEST")
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
    			EventSourceArn:   pulumi.Any(exampleAwsDynamodbTable.StreamArn),
    			FunctionName:     pulumi.Any(exampleAwsLambdaFunction.Arn),
    			StartingPosition: pulumi.String("LATEST"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new()
        {
            EventSourceArn = exampleAwsDynamodbTable.StreamArn,
            FunctionName = exampleAwsLambdaFunction.Arn,
            StartingPosition = "LATEST",
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.lambda.EventSourceMapping;
    import com.pulumi.aws.lambda.EventSourceMappingArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
                .eventSourceArn(exampleAwsDynamodbTable.streamArn())
                .functionName(exampleAwsLambdaFunction.arn())
                .startingPosition("LATEST")
                .build());
    
        }
    }
    
    resources:
      example:
        type: aws:lambda:EventSourceMapping
        properties:
          eventSourceArn: ${exampleAwsDynamodbTable.streamArn}
          functionName: ${exampleAwsLambdaFunction.arn}
          startingPosition: LATEST
    

    Kinesis

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const example = new aws.lambda.EventSourceMapping("example", {
        eventSourceArn: exampleAwsKinesisStream.arn,
        functionName: exampleAwsLambdaFunction.arn,
        startingPosition: "LATEST",
    });
    
    import pulumi
    import pulumi_aws as aws
    
    example = aws.lambda_.EventSourceMapping("example",
        event_source_arn=example_aws_kinesis_stream["arn"],
        function_name=example_aws_lambda_function["arn"],
        starting_position="LATEST")
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
    			EventSourceArn:   pulumi.Any(exampleAwsKinesisStream.Arn),
    			FunctionName:     pulumi.Any(exampleAwsLambdaFunction.Arn),
    			StartingPosition: pulumi.String("LATEST"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new()
        {
            EventSourceArn = exampleAwsKinesisStream.Arn,
            FunctionName = exampleAwsLambdaFunction.Arn,
            StartingPosition = "LATEST",
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.lambda.EventSourceMapping;
    import com.pulumi.aws.lambda.EventSourceMappingArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
                .eventSourceArn(exampleAwsKinesisStream.arn())
                .functionName(exampleAwsLambdaFunction.arn())
                .startingPosition("LATEST")
                .build());
    
        }
    }
    
    resources:
      example:
        type: aws:lambda:EventSourceMapping
        properties:
          eventSourceArn: ${exampleAwsKinesisStream.arn}
          functionName: ${exampleAwsLambdaFunction.arn}
          startingPosition: LATEST
    

    Managed Streaming for Apache Kafka (MSK)

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const example = new aws.lambda.EventSourceMapping("example", {
        eventSourceArn: exampleAwsMskCluster.arn,
        functionName: exampleAwsLambdaFunction.arn,
        topics: ["Example"],
        startingPosition: "TRIM_HORIZON",
    });
    
    import pulumi
    import pulumi_aws as aws
    
    example = aws.lambda_.EventSourceMapping("example",
        event_source_arn=example_aws_msk_cluster["arn"],
        function_name=example_aws_lambda_function["arn"],
        topics=["Example"],
        starting_position="TRIM_HORIZON")
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
    			EventSourceArn: pulumi.Any(exampleAwsMskCluster.Arn),
    			FunctionName:   pulumi.Any(exampleAwsLambdaFunction.Arn),
    			Topics: pulumi.StringArray{
    				pulumi.String("Example"),
    			},
    			StartingPosition: pulumi.String("TRIM_HORIZON"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new()
        {
            EventSourceArn = exampleAwsMskCluster.Arn,
            FunctionName = exampleAwsLambdaFunction.Arn,
            Topics = new[]
            {
                "Example",
            },
            StartingPosition = "TRIM_HORIZON",
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.lambda.EventSourceMapping;
    import com.pulumi.aws.lambda.EventSourceMappingArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
                .eventSourceArn(exampleAwsMskCluster.arn())
                .functionName(exampleAwsLambdaFunction.arn())
                .topics("Example")
                .startingPosition("TRIM_HORIZON")
                .build());
    
        }
    }
    
    resources:
      example:
        type: aws:lambda:EventSourceMapping
        properties:
          eventSourceArn: ${exampleAwsMskCluster.arn}
          functionName: ${exampleAwsLambdaFunction.arn}
          topics:
            - Example
          startingPosition: TRIM_HORIZON
    

    Self Managed Apache Kafka

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const example = new aws.lambda.EventSourceMapping("example", {
        functionName: exampleAwsLambdaFunction.arn,
        topics: ["Example"],
        startingPosition: "TRIM_HORIZON",
        selfManagedEventSource: {
            endpoints: {
                KAFKA_BOOTSTRAP_SERVERS: "kafka1.example.com:9092,kafka2.example.com:9092",
            },
        },
        sourceAccessConfigurations: [
            {
                type: "VPC_SUBNET",
                uri: "subnet:subnet-example1",
            },
            {
                type: "VPC_SUBNET",
                uri: "subnet:subnet-example2",
            },
            {
                type: "VPC_SECURITY_GROUP",
                uri: "security_group:sg-example",
            },
        ],
    });
    
    import pulumi
    import pulumi_aws as aws
    
    example = aws.lambda_.EventSourceMapping("example",
        function_name=example_aws_lambda_function["arn"],
        topics=["Example"],
        starting_position="TRIM_HORIZON",
        self_managed_event_source=aws.lambda_.EventSourceMappingSelfManagedEventSourceArgs(
            endpoints={
                "KAFKA_BOOTSTRAP_SERVERS": "kafka1.example.com:9092,kafka2.example.com:9092",
            },
        ),
        source_access_configurations=[
            aws.lambda_.EventSourceMappingSourceAccessConfigurationArgs(
                type="VPC_SUBNET",
                uri="subnet:subnet-example1",
            ),
            aws.lambda_.EventSourceMappingSourceAccessConfigurationArgs(
                type="VPC_SUBNET",
                uri="subnet:subnet-example2",
            ),
            aws.lambda_.EventSourceMappingSourceAccessConfigurationArgs(
                type="VPC_SECURITY_GROUP",
                uri="security_group:sg-example",
            ),
        ])
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
    			FunctionName: pulumi.Any(exampleAwsLambdaFunction.Arn),
    			Topics: pulumi.StringArray{
    				pulumi.String("Example"),
    			},
    			StartingPosition: pulumi.String("TRIM_HORIZON"),
    			SelfManagedEventSource: &lambda.EventSourceMappingSelfManagedEventSourceArgs{
    				Endpoints: pulumi.StringMap{
    					"KAFKA_BOOTSTRAP_SERVERS": pulumi.String("kafka1.example.com:9092,kafka2.example.com:9092"),
    				},
    			},
    			SourceAccessConfigurations: lambda.EventSourceMappingSourceAccessConfigurationArray{
    				&lambda.EventSourceMappingSourceAccessConfigurationArgs{
    					Type: pulumi.String("VPC_SUBNET"),
    					Uri:  pulumi.String("subnet:subnet-example1"),
    				},
    				&lambda.EventSourceMappingSourceAccessConfigurationArgs{
    					Type: pulumi.String("VPC_SUBNET"),
    					Uri:  pulumi.String("subnet:subnet-example2"),
    				},
    				&lambda.EventSourceMappingSourceAccessConfigurationArgs{
    					Type: pulumi.String("VPC_SECURITY_GROUP"),
    					Uri:  pulumi.String("security_group:sg-example"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new()
        {
            FunctionName = exampleAwsLambdaFunction.Arn,
            Topics = new[]
            {
                "Example",
            },
            StartingPosition = "TRIM_HORIZON",
            SelfManagedEventSource = new Aws.Lambda.Inputs.EventSourceMappingSelfManagedEventSourceArgs
            {
                Endpoints = 
                {
                    { "KAFKA_BOOTSTRAP_SERVERS", "kafka1.example.com:9092,kafka2.example.com:9092" },
                },
            },
            SourceAccessConfigurations = new[]
            {
                new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
                {
                    Type = "VPC_SUBNET",
                    Uri = "subnet:subnet-example1",
                },
                new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
                {
                    Type = "VPC_SUBNET",
                    Uri = "subnet:subnet-example2",
                },
                new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
                {
                    Type = "VPC_SECURITY_GROUP",
                    Uri = "security_group:sg-example",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.lambda.EventSourceMapping;
    import com.pulumi.aws.lambda.EventSourceMappingArgs;
    import com.pulumi.aws.lambda.inputs.EventSourceMappingSelfManagedEventSourceArgs;
    import com.pulumi.aws.lambda.inputs.EventSourceMappingSourceAccessConfigurationArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
                .functionName(exampleAwsLambdaFunction.arn())
                .topics("Example")
                .startingPosition("TRIM_HORIZON")
                .selfManagedEventSource(EventSourceMappingSelfManagedEventSourceArgs.builder()
                    .endpoints(Map.of("KAFKA_BOOTSTRAP_SERVERS", "kafka1.example.com:9092,kafka2.example.com:9092"))
                    .build())
                .sourceAccessConfigurations(            
                    EventSourceMappingSourceAccessConfigurationArgs.builder()
                        .type("VPC_SUBNET")
                        .uri("subnet:subnet-example1")
                        .build(),
                    EventSourceMappingSourceAccessConfigurationArgs.builder()
                        .type("VPC_SUBNET")
                        .uri("subnet:subnet-example2")
                        .build(),
                    EventSourceMappingSourceAccessConfigurationArgs.builder()
                        .type("VPC_SECURITY_GROUP")
                        .uri("security_group:sg-example")
                        .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: aws:lambda:EventSourceMapping
        properties:
          functionName: ${exampleAwsLambdaFunction.arn}
          topics:
            - Example
          startingPosition: TRIM_HORIZON
          selfManagedEventSource:
            endpoints:
              KAFKA_BOOTSTRAP_SERVERS: kafka1.example.com:9092,kafka2.example.com:9092
          sourceAccessConfigurations:
            - type: VPC_SUBNET
              uri: subnet:subnet-example1
            - type: VPC_SUBNET
              uri: subnet:subnet-example2
            - type: VPC_SECURITY_GROUP
              uri: security_group:sg-example
    

    SQS

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const example = new aws.lambda.EventSourceMapping("example", {
        eventSourceArn: sqsQueueTest.arn,
        functionName: exampleAwsLambdaFunction.arn,
    });
    
    import pulumi
    import pulumi_aws as aws
    
    example = aws.lambda_.EventSourceMapping("example",
        event_source_arn=sqs_queue_test["arn"],
        function_name=example_aws_lambda_function["arn"])
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
    			EventSourceArn: pulumi.Any(sqsQueueTest.Arn),
    			FunctionName:   pulumi.Any(exampleAwsLambdaFunction.Arn),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new()
        {
            EventSourceArn = sqsQueueTest.Arn,
            FunctionName = exampleAwsLambdaFunction.Arn,
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.lambda.EventSourceMapping;
    import com.pulumi.aws.lambda.EventSourceMappingArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
                .eventSourceArn(sqsQueueTest.arn())
                .functionName(exampleAwsLambdaFunction.arn())
                .build());
    
        }
    }
    
    resources:
      example:
        type: aws:lambda:EventSourceMapping
        properties:
          eventSourceArn: ${sqsQueueTest.arn}
          functionName: ${exampleAwsLambdaFunction.arn}
    

    SQS with event filter

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const example = new aws.lambda.EventSourceMapping("example", {
        eventSourceArn: sqsQueueTest.arn,
        functionName: exampleAwsLambdaFunction.arn,
        filterCriteria: {
            filters: [{
                pattern: JSON.stringify({
                    body: {
                        Temperature: [{
                            numeric: [
                                ">",
                                0,
                                "<=",
                                100,
                            ],
                        }],
                        Location: ["New York"],
                    },
                }),
            }],
        },
    });
    
    import pulumi
    import json
    import pulumi_aws as aws
    
    example = aws.lambda_.EventSourceMapping("example",
        event_source_arn=sqs_queue_test["arn"],
        function_name=example_aws_lambda_function["arn"],
        filter_criteria=aws.lambda_.EventSourceMappingFilterCriteriaArgs(
            filters=[aws.lambda_.EventSourceMappingFilterCriteriaFilterArgs(
                pattern=json.dumps({
                    "body": {
                        "Temperature": [{
                            "numeric": [
                                ">",
                                0,
                                "<=",
                                100,
                            ],
                        }],
                        "Location": ["New York"],
                    },
                }),
            )],
        ))
    
    package main
    
    import (
    	"encoding/json"
    
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		tmpJSON0, err := json.Marshal(map[string]interface{}{
    			"body": map[string]interface{}{
    				"Temperature": []map[string]interface{}{
    					map[string]interface{}{
    						"numeric": []interface{}{
    							">",
    							0,
    							"<=",
    							100,
    						},
    					},
    				},
    				"Location": []string{
    					"New York",
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		json0 := string(tmpJSON0)
    		_, err = lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
    			EventSourceArn: pulumi.Any(sqsQueueTest.Arn),
    			FunctionName:   pulumi.Any(exampleAwsLambdaFunction.Arn),
    			FilterCriteria: &lambda.EventSourceMappingFilterCriteriaArgs{
    				Filters: lambda.EventSourceMappingFilterCriteriaFilterArray{
    					&lambda.EventSourceMappingFilterCriteriaFilterArgs{
    						Pattern: pulumi.String(json0),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using System.Text.Json;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new()
        {
            EventSourceArn = sqsQueueTest.Arn,
            FunctionName = exampleAwsLambdaFunction.Arn,
            FilterCriteria = new Aws.Lambda.Inputs.EventSourceMappingFilterCriteriaArgs
            {
                Filters = new[]
                {
                    new Aws.Lambda.Inputs.EventSourceMappingFilterCriteriaFilterArgs
                    {
                        Pattern = JsonSerializer.Serialize(new Dictionary<string, object?>
                        {
                            ["body"] = new Dictionary<string, object?>
                            {
                                ["Temperature"] = new[]
                                {
                                    new Dictionary<string, object?>
                                    {
                                        ["numeric"] = new object?[]
                                        {
                                            ">",
                                            0,
                                            "<=",
                                            100,
                                        },
                                    },
                                },
                                ["Location"] = new[]
                                {
                                    "New York",
                                },
                            },
                        }),
                    },
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.lambda.EventSourceMapping;
    import com.pulumi.aws.lambda.EventSourceMappingArgs;
    import com.pulumi.aws.lambda.inputs.EventSourceMappingFilterCriteriaArgs;
    import static com.pulumi.codegen.internal.Serialization.*;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
                .eventSourceArn(sqsQueueTest.arn())
                .functionName(exampleAwsLambdaFunction.arn())
                .filterCriteria(EventSourceMappingFilterCriteriaArgs.builder()
                    .filters(EventSourceMappingFilterCriteriaFilterArgs.builder()
                        .pattern(serializeJson(
                            jsonObject(
                                jsonProperty("body", jsonObject(
                                    jsonProperty("Temperature", jsonArray(jsonObject(
                                        jsonProperty("numeric", jsonArray(
                                            ">", 
                                            0, 
                                            "<=", 
                                            100
                                        ))
                                    ))),
                                    jsonProperty("Location", jsonArray("New York"))
                                ))
                            )))
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: aws:lambda:EventSourceMapping
        properties:
          eventSourceArn: ${sqsQueueTest.arn}
          functionName: ${exampleAwsLambdaFunction.arn}
          filterCriteria:
            filters:
              - pattern:
                  fn::toJSON:
                    body:
                      Temperature:
                        - numeric:
                            - '>'
                            - 0
                            - <=
                            - 100
                      Location:
                        - New York
    

    Amazon MQ (ActiveMQ)

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const example = new aws.lambda.EventSourceMapping("example", {
        batchSize: 10,
        eventSourceArn: exampleAwsMqBroker.arn,
        enabled: true,
        functionName: exampleAwsLambdaFunction.arn,
        queues: "example",
        sourceAccessConfigurations: [{
            type: "BASIC_AUTH",
            uri: exampleAwsSecretsmanagerSecretVersion.arn,
        }],
    });
    
    import pulumi
    import pulumi_aws as aws
    
    example = aws.lambda_.EventSourceMapping("example",
        batch_size=10,
        event_source_arn=example_aws_mq_broker["arn"],
        enabled=True,
        function_name=example_aws_lambda_function["arn"],
        queues="example",
        source_access_configurations=[aws.lambda_.EventSourceMappingSourceAccessConfigurationArgs(
            type="BASIC_AUTH",
            uri=example_aws_secretsmanager_secret_version["arn"],
        )])
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
    			BatchSize:      pulumi.Int(10),
    			EventSourceArn: pulumi.Any(exampleAwsMqBroker.Arn),
    			Enabled:        pulumi.Bool(true),
    			FunctionName:   pulumi.Any(exampleAwsLambdaFunction.Arn),
    			Queues:         pulumi.String("example"),
    			SourceAccessConfigurations: lambda.EventSourceMappingSourceAccessConfigurationArray{
    				&lambda.EventSourceMappingSourceAccessConfigurationArgs{
    					Type: pulumi.String("BASIC_AUTH"),
    					Uri:  pulumi.Any(exampleAwsSecretsmanagerSecretVersion.Arn),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new()
        {
            BatchSize = 10,
            EventSourceArn = exampleAwsMqBroker.Arn,
            Enabled = true,
            FunctionName = exampleAwsLambdaFunction.Arn,
            Queues = "example",
            SourceAccessConfigurations = new[]
            {
                new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
                {
                    Type = "BASIC_AUTH",
                    Uri = exampleAwsSecretsmanagerSecretVersion.Arn,
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.lambda.EventSourceMapping;
    import com.pulumi.aws.lambda.EventSourceMappingArgs;
    import com.pulumi.aws.lambda.inputs.EventSourceMappingSourceAccessConfigurationArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
                .batchSize(10)
                .eventSourceArn(exampleAwsMqBroker.arn())
                .enabled(true)
                .functionName(exampleAwsLambdaFunction.arn())
                .queues("example")
                .sourceAccessConfigurations(EventSourceMappingSourceAccessConfigurationArgs.builder()
                    .type("BASIC_AUTH")
                    .uri(exampleAwsSecretsmanagerSecretVersion.arn())
                    .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: aws:lambda:EventSourceMapping
        properties:
          batchSize: 10
          eventSourceArn: ${exampleAwsMqBroker.arn}
          enabled: true
          functionName: ${exampleAwsLambdaFunction.arn}
          queues: example
          sourceAccessConfigurations:
            - type: BASIC_AUTH
              uri: ${exampleAwsSecretsmanagerSecretVersion.arn}
    

    Amazon MQ (RabbitMQ)

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const example = new aws.lambda.EventSourceMapping("example", {
        batchSize: 1,
        eventSourceArn: exampleAwsMqBroker.arn,
        enabled: true,
        functionName: exampleAwsLambdaFunction.arn,
        queues: "example",
        sourceAccessConfigurations: [
            {
                type: "VIRTUAL_HOST",
                uri: "/example",
            },
            {
                type: "BASIC_AUTH",
                uri: exampleAwsSecretsmanagerSecretVersion.arn,
            },
        ],
    });
    
    import pulumi
    import pulumi_aws as aws
    
    example = aws.lambda_.EventSourceMapping("example",
        batch_size=1,
        event_source_arn=example_aws_mq_broker["arn"],
        enabled=True,
        function_name=example_aws_lambda_function["arn"],
        queues="example",
        source_access_configurations=[
            aws.lambda_.EventSourceMappingSourceAccessConfigurationArgs(
                type="VIRTUAL_HOST",
                uri="/example",
            ),
            aws.lambda_.EventSourceMappingSourceAccessConfigurationArgs(
                type="BASIC_AUTH",
                uri=example_aws_secretsmanager_secret_version["arn"],
            ),
        ])
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
    			BatchSize:      pulumi.Int(1),
    			EventSourceArn: pulumi.Any(exampleAwsMqBroker.Arn),
    			Enabled:        pulumi.Bool(true),
    			FunctionName:   pulumi.Any(exampleAwsLambdaFunction.Arn),
    			Queues:         pulumi.String("example"),
    			SourceAccessConfigurations: lambda.EventSourceMappingSourceAccessConfigurationArray{
    				&lambda.EventSourceMappingSourceAccessConfigurationArgs{
    					Type: pulumi.String("VIRTUAL_HOST"),
    					Uri:  pulumi.String("/example"),
    				},
    				&lambda.EventSourceMappingSourceAccessConfigurationArgs{
    					Type: pulumi.String("BASIC_AUTH"),
    					Uri:  pulumi.Any(exampleAwsSecretsmanagerSecretVersion.Arn),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new()
        {
            BatchSize = 1,
            EventSourceArn = exampleAwsMqBroker.Arn,
            Enabled = true,
            FunctionName = exampleAwsLambdaFunction.Arn,
            Queues = "example",
            SourceAccessConfigurations = new[]
            {
                new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
                {
                    Type = "VIRTUAL_HOST",
                    Uri = "/example",
                },
                new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
                {
                    Type = "BASIC_AUTH",
                    Uri = exampleAwsSecretsmanagerSecretVersion.Arn,
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.lambda.EventSourceMapping;
    import com.pulumi.aws.lambda.EventSourceMappingArgs;
    import com.pulumi.aws.lambda.inputs.EventSourceMappingSourceAccessConfigurationArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
                .batchSize(1)
                .eventSourceArn(exampleAwsMqBroker.arn())
                .enabled(true)
                .functionName(exampleAwsLambdaFunction.arn())
                .queues("example")
                .sourceAccessConfigurations(            
                    EventSourceMappingSourceAccessConfigurationArgs.builder()
                        .type("VIRTUAL_HOST")
                        .uri("/example")
                        .build(),
                    EventSourceMappingSourceAccessConfigurationArgs.builder()
                        .type("BASIC_AUTH")
                        .uri(exampleAwsSecretsmanagerSecretVersion.arn())
                        .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: aws:lambda:EventSourceMapping
        properties:
          batchSize: 1
          eventSourceArn: ${exampleAwsMqBroker.arn}
          enabled: true
          functionName: ${exampleAwsLambdaFunction.arn}
          queues: example
          sourceAccessConfigurations:
            - type: VIRTUAL_HOST
              uri: /example
            - type: BASIC_AUTH
              uri: ${exampleAwsSecretsmanagerSecretVersion.arn}
    

    Create EventSourceMapping Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new EventSourceMapping(name: string, args: EventSourceMappingArgs, opts?: CustomResourceOptions);
    @overload
    def EventSourceMapping(resource_name: str,
                           args: EventSourceMappingArgs,
                           opts: Optional[ResourceOptions] = None)
    
    @overload
    def EventSourceMapping(resource_name: str,
                           opts: Optional[ResourceOptions] = None,
                           function_name: Optional[str] = None,
                           maximum_batching_window_in_seconds: Optional[int] = None,
                           destination_config: Optional[_lambda_.EventSourceMappingDestinationConfigArgs] = None,
                           maximum_retry_attempts: Optional[int] = None,
                           document_db_event_source_config: Optional[_lambda_.EventSourceMappingDocumentDbEventSourceConfigArgs] = None,
                           enabled: Optional[bool] = None,
                           event_source_arn: Optional[str] = None,
                           filter_criteria: Optional[_lambda_.EventSourceMappingFilterCriteriaArgs] = None,
                           batch_size: Optional[int] = None,
                           parallelization_factor: Optional[int] = None,
                           amazon_managed_kafka_event_source_config: Optional[_lambda_.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs] = None,
                           tumbling_window_in_seconds: Optional[int] = None,
                           bisect_batch_on_function_error: Optional[bool] = None,
                           function_response_types: Optional[Sequence[str]] = None,
                           queues: Optional[str] = None,
                           scaling_config: Optional[_lambda_.EventSourceMappingScalingConfigArgs] = None,
                           self_managed_event_source: Optional[_lambda_.EventSourceMappingSelfManagedEventSourceArgs] = None,
                           self_managed_kafka_event_source_config: Optional[_lambda_.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs] = None,
                           source_access_configurations: Optional[Sequence[_lambda_.EventSourceMappingSourceAccessConfigurationArgs]] = None,
                           starting_position: Optional[str] = None,
                           starting_position_timestamp: Optional[str] = None,
                           topics: Optional[Sequence[str]] = None,
                           maximum_record_age_in_seconds: Optional[int] = None)
    func NewEventSourceMapping(ctx *Context, name string, args EventSourceMappingArgs, opts ...ResourceOption) (*EventSourceMapping, error)
    public EventSourceMapping(string name, EventSourceMappingArgs args, CustomResourceOptions? opts = null)
    public EventSourceMapping(String name, EventSourceMappingArgs args)
    public EventSourceMapping(String name, EventSourceMappingArgs args, CustomResourceOptions options)
    
    type: aws:lambda:EventSourceMapping
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args EventSourceMappingArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args EventSourceMappingArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args EventSourceMappingArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args EventSourceMappingArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args EventSourceMappingArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var eventSourceMappingResource = new Aws.Lambda.EventSourceMapping("eventSourceMappingResource", new()
    {
        FunctionName = "string",
        MaximumBatchingWindowInSeconds = 0,
        DestinationConfig = new Aws.Lambda.Inputs.EventSourceMappingDestinationConfigArgs
        {
            OnFailure = new Aws.Lambda.Inputs.EventSourceMappingDestinationConfigOnFailureArgs
            {
                DestinationArn = "string",
            },
        },
        MaximumRetryAttempts = 0,
        DocumentDbEventSourceConfig = new Aws.Lambda.Inputs.EventSourceMappingDocumentDbEventSourceConfigArgs
        {
            DatabaseName = "string",
            CollectionName = "string",
            FullDocument = "string",
        },
        Enabled = false,
        EventSourceArn = "string",
        FilterCriteria = new Aws.Lambda.Inputs.EventSourceMappingFilterCriteriaArgs
        {
            Filters = new[]
            {
                new Aws.Lambda.Inputs.EventSourceMappingFilterCriteriaFilterArgs
                {
                    Pattern = "string",
                },
            },
        },
        BatchSize = 0,
        ParallelizationFactor = 0,
        AmazonManagedKafkaEventSourceConfig = new Aws.Lambda.Inputs.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs
        {
            ConsumerGroupId = "string",
        },
        TumblingWindowInSeconds = 0,
        BisectBatchOnFunctionError = false,
        FunctionResponseTypes = new[]
        {
            "string",
        },
        Queues = "string",
        ScalingConfig = new Aws.Lambda.Inputs.EventSourceMappingScalingConfigArgs
        {
            MaximumConcurrency = 0,
        },
        SelfManagedEventSource = new Aws.Lambda.Inputs.EventSourceMappingSelfManagedEventSourceArgs
        {
            Endpoints = 
            {
                { "string", "string" },
            },
        },
        SelfManagedKafkaEventSourceConfig = new Aws.Lambda.Inputs.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs
        {
            ConsumerGroupId = "string",
        },
        SourceAccessConfigurations = new[]
        {
            new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
            {
                Type = "string",
                Uri = "string",
            },
        },
        StartingPosition = "string",
        StartingPositionTimestamp = "string",
        Topics = new[]
        {
            "string",
        },
        MaximumRecordAgeInSeconds = 0,
    });
    
    example, err := lambda.NewEventSourceMapping(ctx, "eventSourceMappingResource", &lambda.EventSourceMappingArgs{
    	FunctionName:                   pulumi.String("string"),
    	MaximumBatchingWindowInSeconds: pulumi.Int(0),
    	DestinationConfig: &lambda.EventSourceMappingDestinationConfigArgs{
    		OnFailure: &lambda.EventSourceMappingDestinationConfigOnFailureArgs{
    			DestinationArn: pulumi.String("string"),
    		},
    	},
    	MaximumRetryAttempts: pulumi.Int(0),
    	DocumentDbEventSourceConfig: &lambda.EventSourceMappingDocumentDbEventSourceConfigArgs{
    		DatabaseName:   pulumi.String("string"),
    		CollectionName: pulumi.String("string"),
    		FullDocument:   pulumi.String("string"),
    	},
    	Enabled:        pulumi.Bool(false),
    	EventSourceArn: pulumi.String("string"),
    	FilterCriteria: &lambda.EventSourceMappingFilterCriteriaArgs{
    		Filters: lambda.EventSourceMappingFilterCriteriaFilterArray{
    			&lambda.EventSourceMappingFilterCriteriaFilterArgs{
    				Pattern: pulumi.String("string"),
    			},
    		},
    	},
    	BatchSize:             pulumi.Int(0),
    	ParallelizationFactor: pulumi.Int(0),
    	AmazonManagedKafkaEventSourceConfig: &lambda.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs{
    		ConsumerGroupId: pulumi.String("string"),
    	},
    	TumblingWindowInSeconds:    pulumi.Int(0),
    	BisectBatchOnFunctionError: pulumi.Bool(false),
    	FunctionResponseTypes: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    	Queues: pulumi.String("string"),
    	ScalingConfig: &lambda.EventSourceMappingScalingConfigArgs{
    		MaximumConcurrency: pulumi.Int(0),
    	},
    	SelfManagedEventSource: &lambda.EventSourceMappingSelfManagedEventSourceArgs{
    		Endpoints: pulumi.StringMap{
    			"string": pulumi.String("string"),
    		},
    	},
    	SelfManagedKafkaEventSourceConfig: &lambda.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs{
    		ConsumerGroupId: pulumi.String("string"),
    	},
    	SourceAccessConfigurations: lambda.EventSourceMappingSourceAccessConfigurationArray{
    		&lambda.EventSourceMappingSourceAccessConfigurationArgs{
    			Type: pulumi.String("string"),
    			Uri:  pulumi.String("string"),
    		},
    	},
    	StartingPosition:          pulumi.String("string"),
    	StartingPositionTimestamp: pulumi.String("string"),
    	Topics: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    	MaximumRecordAgeInSeconds: pulumi.Int(0),
    })
    
    var eventSourceMappingResource = new EventSourceMapping("eventSourceMappingResource", EventSourceMappingArgs.builder()
        .functionName("string")
        .maximumBatchingWindowInSeconds(0)
        .destinationConfig(EventSourceMappingDestinationConfigArgs.builder()
            .onFailure(EventSourceMappingDestinationConfigOnFailureArgs.builder()
                .destinationArn("string")
                .build())
            .build())
        .maximumRetryAttempts(0)
        .documentDbEventSourceConfig(EventSourceMappingDocumentDbEventSourceConfigArgs.builder()
            .databaseName("string")
            .collectionName("string")
            .fullDocument("string")
            .build())
        .enabled(false)
        .eventSourceArn("string")
        .filterCriteria(EventSourceMappingFilterCriteriaArgs.builder()
            .filters(EventSourceMappingFilterCriteriaFilterArgs.builder()
                .pattern("string")
                .build())
            .build())
        .batchSize(0)
        .parallelizationFactor(0)
        .amazonManagedKafkaEventSourceConfig(EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs.builder()
            .consumerGroupId("string")
            .build())
        .tumblingWindowInSeconds(0)
        .bisectBatchOnFunctionError(false)
        .functionResponseTypes("string")
        .queues("string")
        .scalingConfig(EventSourceMappingScalingConfigArgs.builder()
            .maximumConcurrency(0)
            .build())
        .selfManagedEventSource(EventSourceMappingSelfManagedEventSourceArgs.builder()
            .endpoints(Map.of("string", "string"))
            .build())
        .selfManagedKafkaEventSourceConfig(EventSourceMappingSelfManagedKafkaEventSourceConfigArgs.builder()
            .consumerGroupId("string")
            .build())
        .sourceAccessConfigurations(EventSourceMappingSourceAccessConfigurationArgs.builder()
            .type("string")
            .uri("string")
            .build())
        .startingPosition("string")
        .startingPositionTimestamp("string")
        .topics("string")
        .maximumRecordAgeInSeconds(0)
        .build());
    
    event_source_mapping_resource = aws.lambda_.EventSourceMapping("eventSourceMappingResource",
        function_name="string",
        maximum_batching_window_in_seconds=0,
        destination_config=aws.lambda_.EventSourceMappingDestinationConfigArgs(
            on_failure=aws.lambda_.EventSourceMappingDestinationConfigOnFailureArgs(
                destination_arn="string",
            ),
        ),
        maximum_retry_attempts=0,
        document_db_event_source_config=aws.lambda_.EventSourceMappingDocumentDbEventSourceConfigArgs(
            database_name="string",
            collection_name="string",
            full_document="string",
        ),
        enabled=False,
        event_source_arn="string",
        filter_criteria=aws.lambda_.EventSourceMappingFilterCriteriaArgs(
            filters=[aws.lambda_.EventSourceMappingFilterCriteriaFilterArgs(
                pattern="string",
            )],
        ),
        batch_size=0,
        parallelization_factor=0,
        amazon_managed_kafka_event_source_config=aws.lambda_.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs(
            consumer_group_id="string",
        ),
        tumbling_window_in_seconds=0,
        bisect_batch_on_function_error=False,
        function_response_types=["string"],
        queues="string",
        scaling_config=aws.lambda_.EventSourceMappingScalingConfigArgs(
            maximum_concurrency=0,
        ),
        self_managed_event_source=aws.lambda_.EventSourceMappingSelfManagedEventSourceArgs(
            endpoints={
                "string": "string",
            },
        ),
        self_managed_kafka_event_source_config=aws.lambda_.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs(
            consumer_group_id="string",
        ),
        source_access_configurations=[aws.lambda_.EventSourceMappingSourceAccessConfigurationArgs(
            type="string",
            uri="string",
        )],
        starting_position="string",
        starting_position_timestamp="string",
        topics=["string"],
        maximum_record_age_in_seconds=0)
    
    const eventSourceMappingResource = new aws.lambda.EventSourceMapping("eventSourceMappingResource", {
        functionName: "string",
        maximumBatchingWindowInSeconds: 0,
        destinationConfig: {
            onFailure: {
                destinationArn: "string",
            },
        },
        maximumRetryAttempts: 0,
        documentDbEventSourceConfig: {
            databaseName: "string",
            collectionName: "string",
            fullDocument: "string",
        },
        enabled: false,
        eventSourceArn: "string",
        filterCriteria: {
            filters: [{
                pattern: "string",
            }],
        },
        batchSize: 0,
        parallelizationFactor: 0,
        amazonManagedKafkaEventSourceConfig: {
            consumerGroupId: "string",
        },
        tumblingWindowInSeconds: 0,
        bisectBatchOnFunctionError: false,
        functionResponseTypes: ["string"],
        queues: "string",
        scalingConfig: {
            maximumConcurrency: 0,
        },
        selfManagedEventSource: {
            endpoints: {
                string: "string",
            },
        },
        selfManagedKafkaEventSourceConfig: {
            consumerGroupId: "string",
        },
        sourceAccessConfigurations: [{
            type: "string",
            uri: "string",
        }],
        startingPosition: "string",
        startingPositionTimestamp: "string",
        topics: ["string"],
        maximumRecordAgeInSeconds: 0,
    });
    
    type: aws:lambda:EventSourceMapping
    properties:
        amazonManagedKafkaEventSourceConfig:
            consumerGroupId: string
        batchSize: 0
        bisectBatchOnFunctionError: false
        destinationConfig:
            onFailure:
                destinationArn: string
        documentDbEventSourceConfig:
            collectionName: string
            databaseName: string
            fullDocument: string
        enabled: false
        eventSourceArn: string
        filterCriteria:
            filters:
                - pattern: string
        functionName: string
        functionResponseTypes:
            - string
        maximumBatchingWindowInSeconds: 0
        maximumRecordAgeInSeconds: 0
        maximumRetryAttempts: 0
        parallelizationFactor: 0
        queues: string
        scalingConfig:
            maximumConcurrency: 0
        selfManagedEventSource:
            endpoints:
                string: string
        selfManagedKafkaEventSourceConfig:
            consumerGroupId: string
        sourceAccessConfigurations:
            - type: string
              uri: string
        startingPosition: string
        startingPositionTimestamp: string
        topics:
            - string
        tumblingWindowInSeconds: 0
    

    EventSourceMapping Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The EventSourceMapping resource accepts the following input properties:

    FunctionName string
    The name or the ARN of the Lambda function that will be subscribing to events.
    AmazonManagedKafkaEventSourceConfig EventSourceMappingAmazonManagedKafkaEventSourceConfig
    Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
    BatchSize int
    The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.
    BisectBatchOnFunctionError bool
    • (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
    DestinationConfig EventSourceMappingDestinationConfig
    • (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
    DocumentDbEventSourceConfig EventSourceMappingDocumentDbEventSourceConfig
    • (Optional) Configuration settings for a DocumentDB event source. Detailed below.
    Enabled bool
    Determines if the mapping will be enabled on creation. Defaults to true.
    EventSourceArn string
    The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
    FilterCriteria EventSourceMappingFilterCriteria
    The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
    FunctionResponseTypes List<string>
    A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.
    MaximumBatchingWindowInSeconds int
    The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.
    MaximumRecordAgeInSeconds int
    • (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
    MaximumRetryAttempts int
    • (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
    ParallelizationFactor int
    • (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
    Queues string
    The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
    ScalingConfig EventSourceMappingScalingConfig
    Scaling configuration of the event source. Only available for SQS queues. Detailed below.
    SelfManagedEventSource EventSourceMappingSelfManagedEventSource
    • (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
    SelfManagedKafkaEventSourceConfig EventSourceMappingSelfManagedKafkaEventSourceConfig
    Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
    SourceAccessConfigurations List<EventSourceMappingSourceAccessConfiguration>
    For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
    StartingPosition string
    The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.
    StartingPositionTimestamp string
    A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.
    Topics List<string>
    The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
    TumblingWindowInSeconds int
    The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
    FunctionName string
    The name or the ARN of the Lambda function that will be subscribing to events.
    AmazonManagedKafkaEventSourceConfig EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs
    Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
    BatchSize int
    The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.
    BisectBatchOnFunctionError bool
    • (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
    DestinationConfig EventSourceMappingDestinationConfigArgs
    • (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
    DocumentDbEventSourceConfig EventSourceMappingDocumentDbEventSourceConfigArgs
    • (Optional) Configuration settings for a DocumentDB event source. Detailed below.
    Enabled bool
    Determines if the mapping will be enabled on creation. Defaults to true.
    EventSourceArn string
    The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
    FilterCriteria EventSourceMappingFilterCriteriaArgs
    The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
    FunctionResponseTypes []string
    A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.
    MaximumBatchingWindowInSeconds int
    The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.
    MaximumRecordAgeInSeconds int
    • (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
    MaximumRetryAttempts int
    • (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
    ParallelizationFactor int
    • (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
    Queues string
    The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
    ScalingConfig EventSourceMappingScalingConfigArgs
    Scaling configuration of the event source. Only available for SQS queues. Detailed below.
    SelfManagedEventSource EventSourceMappingSelfManagedEventSourceArgs
    • (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
    SelfManagedKafkaEventSourceConfig EventSourceMappingSelfManagedKafkaEventSourceConfigArgs
    Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
    SourceAccessConfigurations []EventSourceMappingSourceAccessConfigurationArgs
    For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
    StartingPosition string
    The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.
    StartingPositionTimestamp string
    A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.
    Topics []string
    The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
    TumblingWindowInSeconds int
    The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
    functionName String
    The name or the ARN of the Lambda function that will be subscribing to events.
    amazonManagedKafkaEventSourceConfig EventSourceMappingAmazonManagedKafkaEventSourceConfig
    Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
    batchSize Integer
    The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.
    bisectBatchOnFunctionError Boolean
    • (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
    destinationConfig EventSourceMappingDestinationConfig
    • (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
    documentDbEventSourceConfig EventSourceMappingDocumentDbEventSourceConfig
    • (Optional) Configuration settings for a DocumentDB event source. Detailed below.
    enabled Boolean
    Determines if the mapping will be enabled on creation. Defaults to true.
    eventSourceArn String
    The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
    filterCriteria EventSourceMappingFilterCriteria
    The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
    functionResponseTypes List<String>
    A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.
    maximumBatchingWindowInSeconds Integer
    The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.
    maximumRecordAgeInSeconds Integer
    • (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
    maximumRetryAttempts Integer
    • (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
    parallelizationFactor Integer
    • (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
    queues String
    The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
    scalingConfig EventSourceMappingScalingConfig
    Scaling configuration of the event source. Only available for SQS queues. Detailed below.
    selfManagedEventSource EventSourceMappingSelfManagedEventSource
    • (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
    selfManagedKafkaEventSourceConfig EventSourceMappingSelfManagedKafkaEventSourceConfig
    Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
    sourceAccessConfigurations List<EventSourceMappingSourceAccessConfiguration>
    For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
    startingPosition String
    The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.
    startingPositionTimestamp String
    A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.
    topics List<String>
    The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
    tumblingWindowInSeconds Integer
    The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
    functionName string
    The name or the ARN of the Lambda function that will be subscribing to events.
    amazonManagedKafkaEventSourceConfig EventSourceMappingAmazonManagedKafkaEventSourceConfig
    Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
    batchSize number
    The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.
    bisectBatchOnFunctionError boolean
    • (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
    destinationConfig EventSourceMappingDestinationConfig
    • (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
    documentDbEventSourceConfig EventSourceMappingDocumentDbEventSourceConfig
    • (Optional) Configuration settings for a DocumentDB event source. Detailed below.
    enabled boolean
    Determines if the mapping will be enabled on creation. Defaults to true.
    eventSourceArn string
    The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
    filterCriteria EventSourceMappingFilterCriteria
    The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
    functionResponseTypes string[]
    A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.
    maximumBatchingWindowInSeconds number
    The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.
    maximumRecordAgeInSeconds number
    • (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
    maximumRetryAttempts number
    • (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
    parallelizationFactor number
    • (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
    queues string
    The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
    scalingConfig EventSourceMappingScalingConfig
    Scaling configuration of the event source. Only available for SQS queues. Detailed below.
    selfManagedEventSource EventSourceMappingSelfManagedEventSource
    • (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
    selfManagedKafkaEventSourceConfig EventSourceMappingSelfManagedKafkaEventSourceConfig
    Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
    sourceAccessConfigurations EventSourceMappingSourceAccessConfiguration[]
    For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
    startingPosition string
    The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.
    startingPositionTimestamp string
    A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.
    topics string[]
    The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
    tumblingWindowInSeconds number
    The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
    function_name str
    The name or the ARN of the Lambda function that will be subscribing to events.
    amazon_managed_kafka_event_source_config lambda_.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs
    Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
    batch_size int
    The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.
    bisect_batch_on_function_error bool
    • (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
    destination_config lambda_.EventSourceMappingDestinationConfigArgs
    • (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
    document_db_event_source_config lambda_.EventSourceMappingDocumentDbEventSourceConfigArgs
    • (Optional) Configuration settings for a DocumentDB event source. Detailed below.
    enabled bool
    Determines if the mapping will be enabled on creation. Defaults to true.
    event_source_arn str
    The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
    filter_criteria lambda_.EventSourceMappingFilterCriteriaArgs
    The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
    function_response_types Sequence[str]
    A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.
    maximum_batching_window_in_seconds int
    The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.
    maximum_record_age_in_seconds int
    • (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
    maximum_retry_attempts int
    • (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
    parallelization_factor int
    • (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
    queues str
    The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
    scaling_config lambda_.EventSourceMappingScalingConfigArgs
    Scaling configuration of the event source. Only available for SQS queues. Detailed below.
    self_managed_event_source lambda_.EventSourceMappingSelfManagedEventSourceArgs
    • (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
    self_managed_kafka_event_source_config lambda_.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs
    Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
    source_access_configurations Sequence[lambda_.EventSourceMappingSourceAccessConfigurationArgs]
    For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
    starting_position str
    The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.
    starting_position_timestamp str
    A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.
    topics Sequence[str]
    The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
    tumbling_window_in_seconds int
    The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
    functionName String
    The name or the ARN of the Lambda function that will be subscribing to events.
    amazonManagedKafkaEventSourceConfig Property Map
    Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
    batchSize Number
    The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.
    bisectBatchOnFunctionError Boolean
    • (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
    destinationConfig Property Map
    • (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
    documentDbEventSourceConfig Property Map
    • (Optional) Configuration settings for a DocumentDB event source. Detailed below.
    enabled Boolean
    Determines if the mapping will be enabled on creation. Defaults to true.
    eventSourceArn String
    The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
    filterCriteria Property Map
    The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
    functionResponseTypes List<String>
    A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.
    maximumBatchingWindowInSeconds Number
    The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.
    maximumRecordAgeInSeconds Number
    • (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
    maximumRetryAttempts Number
    • (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
    parallelizationFactor Number
    • (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
    queues String
    The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
    scalingConfig Property Map
    Scaling configuration of the event source. Only available for SQS queues. Detailed below.
    selfManagedEventSource Property Map
    • (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
    selfManagedKafkaEventSourceConfig Property Map
    Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
    sourceAccessConfigurations List<Property Map>
    For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
    startingPosition String
    The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.
    startingPositionTimestamp String
    A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.
    topics List<String>
    The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
    tumblingWindowInSeconds Number
    The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

    Outputs

    All input properties are implicitly available as output properties. Additionally, the EventSourceMapping resource produces the following output properties:

    FunctionArn string
    The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)
    Id string
    The provider-assigned unique ID for this managed resource.
    LastModified string
    The date this resource was last modified.
    LastProcessingResult string
    The result of the last AWS Lambda invocation of your Lambda function.
    State string
    The state of the event source mapping.
    StateTransitionReason string
    The reason the event source mapping is in its current state.
    Uuid string
    The UUID of the created event source mapping.
    FunctionArn string
    The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)
    Id string
    The provider-assigned unique ID for this managed resource.
    LastModified string
    The date this resource was last modified.
    LastProcessingResult string
    The result of the last AWS Lambda invocation of your Lambda function.
    State string
    The state of the event source mapping.
    StateTransitionReason string
    The reason the event source mapping is in its current state.
    Uuid string
    The UUID of the created event source mapping.
    functionArn String
    The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)
    id String
    The provider-assigned unique ID for this managed resource.
    lastModified String
    The date this resource was last modified.
    lastProcessingResult String
    The result of the last AWS Lambda invocation of your Lambda function.
    state String
    The state of the event source mapping.
    stateTransitionReason String
    The reason the event source mapping is in its current state.
    uuid String
    The UUID of the created event source mapping.
    functionArn string
    The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)
    id string
    The provider-assigned unique ID for this managed resource.
    lastModified string
    The date this resource was last modified.
    lastProcessingResult string
    The result of the last AWS Lambda invocation of your Lambda function.
    state string
    The state of the event source mapping.
    stateTransitionReason string
    The reason the event source mapping is in its current state.
    uuid string
    The UUID of the created event source mapping.
    function_arn str
    The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)
    id str
    The provider-assigned unique ID for this managed resource.
    last_modified str
    The date this resource was last modified.
    last_processing_result str
    The result of the last AWS Lambda invocation of your Lambda function.
    state str
    The state of the event source mapping.
    state_transition_reason str
    The reason the event source mapping is in its current state.
    uuid str
    The UUID of the created event source mapping.
    functionArn String
    The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)
    id String
    The provider-assigned unique ID for this managed resource.
    lastModified String
    The date this resource was last modified.
    lastProcessingResult String
    The result of the last AWS Lambda invocation of your Lambda function.
    state String
    The state of the event source mapping.
    stateTransitionReason String
    The reason the event source mapping is in its current state.
    uuid String
    The UUID of the created event source mapping.

    Look up Existing EventSourceMapping Resource

    Get an existing EventSourceMapping resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: EventSourceMappingState, opts?: CustomResourceOptions): EventSourceMapping
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            amazon_managed_kafka_event_source_config: Optional[_lambda_.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs] = None,
            batch_size: Optional[int] = None,
            bisect_batch_on_function_error: Optional[bool] = None,
            destination_config: Optional[_lambda_.EventSourceMappingDestinationConfigArgs] = None,
            document_db_event_source_config: Optional[_lambda_.EventSourceMappingDocumentDbEventSourceConfigArgs] = None,
            enabled: Optional[bool] = None,
            event_source_arn: Optional[str] = None,
            filter_criteria: Optional[_lambda_.EventSourceMappingFilterCriteriaArgs] = None,
            function_arn: Optional[str] = None,
            function_name: Optional[str] = None,
            function_response_types: Optional[Sequence[str]] = None,
            last_modified: Optional[str] = None,
            last_processing_result: Optional[str] = None,
            maximum_batching_window_in_seconds: Optional[int] = None,
            maximum_record_age_in_seconds: Optional[int] = None,
            maximum_retry_attempts: Optional[int] = None,
            parallelization_factor: Optional[int] = None,
            queues: Optional[str] = None,
            scaling_config: Optional[_lambda_.EventSourceMappingScalingConfigArgs] = None,
            self_managed_event_source: Optional[_lambda_.EventSourceMappingSelfManagedEventSourceArgs] = None,
            self_managed_kafka_event_source_config: Optional[_lambda_.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs] = None,
            source_access_configurations: Optional[Sequence[_lambda_.EventSourceMappingSourceAccessConfigurationArgs]] = None,
            starting_position: Optional[str] = None,
            starting_position_timestamp: Optional[str] = None,
            state: Optional[str] = None,
            state_transition_reason: Optional[str] = None,
            topics: Optional[Sequence[str]] = None,
            tumbling_window_in_seconds: Optional[int] = None,
            uuid: Optional[str] = None) -> EventSourceMapping
    func GetEventSourceMapping(ctx *Context, name string, id IDInput, state *EventSourceMappingState, opts ...ResourceOption) (*EventSourceMapping, error)
    public static EventSourceMapping Get(string name, Input<string> id, EventSourceMappingState? state, CustomResourceOptions? opts = null)
    public static EventSourceMapping get(String name, Output<String> id, EventSourceMappingState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AmazonManagedKafkaEventSourceConfig EventSourceMappingAmazonManagedKafkaEventSourceConfig
    Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
    BatchSize int
    The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.
    BisectBatchOnFunctionError bool
    • (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
    DestinationConfig EventSourceMappingDestinationConfig
    • (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
    DocumentDbEventSourceConfig EventSourceMappingDocumentDbEventSourceConfig
    • (Optional) Configuration settings for a DocumentDB event source. Detailed below.
    Enabled bool
    Determines if the mapping will be enabled on creation. Defaults to true.
    EventSourceArn string
    The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
    FilterCriteria EventSourceMappingFilterCriteria
    The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
    FunctionArn string
    The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)
    FunctionName string
    The name or the ARN of the Lambda function that will be subscribing to events.
    FunctionResponseTypes List<string>
    A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.
    LastModified string
    The date this resource was last modified.
    LastProcessingResult string
    The result of the last AWS Lambda invocation of your Lambda function.
    MaximumBatchingWindowInSeconds int
    The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.
    MaximumRecordAgeInSeconds int
    • (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
    MaximumRetryAttempts int
    • (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
    ParallelizationFactor int
    • (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
    Queues string
    The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
    ScalingConfig EventSourceMappingScalingConfig
    Scaling configuration of the event source. Only available for SQS queues. Detailed below.
    SelfManagedEventSource EventSourceMappingSelfManagedEventSource
    • (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
    SelfManagedKafkaEventSourceConfig EventSourceMappingSelfManagedKafkaEventSourceConfig
    Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
    SourceAccessConfigurations List<EventSourceMappingSourceAccessConfiguration>
    For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
    StartingPosition string
    The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.
    StartingPositionTimestamp string
    A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.
    State string
    The state of the event source mapping.
    StateTransitionReason string
    The reason the event source mapping is in its current state.
    Topics List<string>
    The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
    TumblingWindowInSeconds int
    The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
    Uuid string
    The UUID of the created event source mapping.
    AmazonManagedKafkaEventSourceConfig EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs
    Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
    BatchSize int
    The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.
    BisectBatchOnFunctionError bool
    • (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
    DestinationConfig EventSourceMappingDestinationConfigArgs
    • (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
    DocumentDbEventSourceConfig EventSourceMappingDocumentDbEventSourceConfigArgs
    • (Optional) Configuration settings for a DocumentDB event source. Detailed below.
    Enabled bool
    Determines if the mapping will be enabled on creation. Defaults to true.
    EventSourceArn string
    The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
    FilterCriteria EventSourceMappingFilterCriteriaArgs
    The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
    FunctionArn string
    The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)
    FunctionName string
    The name or the ARN of the Lambda function that will be subscribing to events.
    FunctionResponseTypes []string
    A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.
    LastModified string
    The date this resource was last modified.
    LastProcessingResult string
    The result of the last AWS Lambda invocation of your Lambda function.
    MaximumBatchingWindowInSeconds int
    The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.
    MaximumRecordAgeInSeconds int
    • (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
    MaximumRetryAttempts int
    • (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
    ParallelizationFactor int
    • (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
    Queues string
    The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
    ScalingConfig EventSourceMappingScalingConfigArgs
    Scaling configuration of the event source. Only available for SQS queues. Detailed below.
    SelfManagedEventSource EventSourceMappingSelfManagedEventSourceArgs
    • (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
    SelfManagedKafkaEventSourceConfig EventSourceMappingSelfManagedKafkaEventSourceConfigArgs
    Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
    SourceAccessConfigurations []EventSourceMappingSourceAccessConfigurationArgs
    For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
    StartingPosition string
    The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.
    StartingPositionTimestamp string
    A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.
    State string
    The state of the event source mapping.
    StateTransitionReason string
    The reason the event source mapping is in its current state.
    Topics []string
    The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
    TumblingWindowInSeconds int
    The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
    Uuid string
    The UUID of the created event source mapping.
    amazonManagedKafkaEventSourceConfig EventSourceMappingAmazonManagedKafkaEventSourceConfig
    Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
    batchSize Integer
    The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.
    bisectBatchOnFunctionError Boolean
    • (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
    destinationConfig EventSourceMappingDestinationConfig
    • (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
    documentDbEventSourceConfig EventSourceMappingDocumentDbEventSourceConfig
    • (Optional) Configuration settings for a DocumentDB event source. Detailed below.
    enabled Boolean
    Determines if the mapping will be enabled on creation. Defaults to true.
    eventSourceArn String
    The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
    filterCriteria EventSourceMappingFilterCriteria
    The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
    functionArn String
    The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)
    functionName String
    The name or the ARN of the Lambda function that will be subscribing to events.
    functionResponseTypes List<String>
    A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.
    lastModified String
    The date this resource was last modified.
    lastProcessingResult String
    The result of the last AWS Lambda invocation of your Lambda function.
    maximumBatchingWindowInSeconds Integer
    The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.
    maximumRecordAgeInSeconds Integer
    • (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
    maximumRetryAttempts Integer
    • (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
    parallelizationFactor Integer
    • (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
    queues String
    The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
    scalingConfig EventSourceMappingScalingConfig
    Scaling configuration of the event source. Only available for SQS queues. Detailed below.
    selfManagedEventSource EventSourceMappingSelfManagedEventSource
    • (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
    selfManagedKafkaEventSourceConfig EventSourceMappingSelfManagedKafkaEventSourceConfig
    Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
    sourceAccessConfigurations List<EventSourceMappingSourceAccessConfiguration>
    For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
    startingPosition String
    The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.
    startingPositionTimestamp String
    A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.
    state String
    The state of the event source mapping.
    stateTransitionReason String
    The reason the event source mapping is in its current state.
    topics List<String>
    The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
    tumblingWindowInSeconds Integer
    The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
    uuid String
    The UUID of the created event source mapping.
    amazonManagedKafkaEventSourceConfig EventSourceMappingAmazonManagedKafkaEventSourceConfig
    Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
    batchSize number
    The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.
    bisectBatchOnFunctionError boolean
    • (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
    destinationConfig EventSourceMappingDestinationConfig
    • (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
    documentDbEventSourceConfig EventSourceMappingDocumentDbEventSourceConfig
    • (Optional) Configuration settings for a DocumentDB event source. Detailed below.
    enabled boolean
    Determines if the mapping will be enabled on creation. Defaults to true.
    eventSourceArn string
    The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
    filterCriteria EventSourceMappingFilterCriteria
    The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
    functionArn string
    The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)
    functionName string
    The name or the ARN of the Lambda function that will be subscribing to events.
    functionResponseTypes string[]
    A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.
    lastModified string
    The date this resource was last modified.
    lastProcessingResult string
    The result of the last AWS Lambda invocation of your Lambda function.
    maximumBatchingWindowInSeconds number
    The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.
    maximumRecordAgeInSeconds number
    • (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
    maximumRetryAttempts number
    • (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
    parallelizationFactor number
    • (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
    queues string
    The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
    scalingConfig EventSourceMappingScalingConfig
    Scaling configuration of the event source. Only available for SQS queues. Detailed below.
    selfManagedEventSource EventSourceMappingSelfManagedEventSource
    • (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
    selfManagedKafkaEventSourceConfig EventSourceMappingSelfManagedKafkaEventSourceConfig
    Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
    sourceAccessConfigurations EventSourceMappingSourceAccessConfiguration[]
    For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
    startingPosition string
    The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.
    startingPositionTimestamp string
    A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.
    state string
    The state of the event source mapping.
    stateTransitionReason string
    The reason the event source mapping is in its current state.
    topics string[]
    The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
    tumblingWindowInSeconds number
    The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
    uuid string
    The UUID of the created event source mapping.
    amazon_managed_kafka_event_source_config lambda_.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs
    Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
    batch_size int
    The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.
    bisect_batch_on_function_error bool
    • (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
    destination_config lambda_.EventSourceMappingDestinationConfigArgs
    • (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
    document_db_event_source_config lambda_.EventSourceMappingDocumentDbEventSourceConfigArgs
    • (Optional) Configuration settings for a DocumentDB event source. Detailed below.
    enabled bool
    Determines if the mapping will be enabled on creation. Defaults to true.
    event_source_arn str
    The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
    filter_criteria lambda_.EventSourceMappingFilterCriteriaArgs
    The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
    function_arn str
    The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)
    function_name str
    The name or the ARN of the Lambda function that will be subscribing to events.
    function_response_types Sequence[str]
    A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.
    last_modified str
    The date this resource was last modified.
    last_processing_result str
    The result of the last AWS Lambda invocation of your Lambda function.
    maximum_batching_window_in_seconds int
    The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.
    maximum_record_age_in_seconds int
    • (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
    maximum_retry_attempts int
    • (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
    parallelization_factor int
    • (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
    queues str
    The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
    scaling_config lambda_.EventSourceMappingScalingConfigArgs
    Scaling configuration of the event source. Only available for SQS queues. Detailed below.
    self_managed_event_source lambda_.EventSourceMappingSelfManagedEventSourceArgs
    • (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
    self_managed_kafka_event_source_config lambda_.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs
    Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
    source_access_configurations Sequence[lambda_.EventSourceMappingSourceAccessConfigurationArgs]
    For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
    starting_position str
    The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.
    starting_position_timestamp str
    A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.
    state str
    The state of the event source mapping.
    state_transition_reason str
    The reason the event source mapping is in its current state.
    topics Sequence[str]
    The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
    tumbling_window_in_seconds int
    The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
    uuid str
    The UUID of the created event source mapping.
    amazonManagedKafkaEventSourceConfig Property Map
    Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
    batchSize Number
    The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.
    bisectBatchOnFunctionError Boolean
    • (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
    destinationConfig Property Map
    • (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
    documentDbEventSourceConfig Property Map
    • (Optional) Configuration settings for a DocumentDB event source. Detailed below.
    enabled Boolean
    Determines if the mapping will be enabled on creation. Defaults to true.
    eventSourceArn String
    The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
    filterCriteria Property Map
    The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
    functionArn String
    The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)
    functionName String
    The name or the ARN of the Lambda function that will be subscribing to events.
    functionResponseTypes List<String>
    A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.
    lastModified String
    The date this resource was last modified.
    lastProcessingResult String
    The result of the last AWS Lambda invocation of your Lambda function.
    maximumBatchingWindowInSeconds Number
    The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.
    maximumRecordAgeInSeconds Number
    • (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
    maximumRetryAttempts Number
    • (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
    parallelizationFactor Number
    • (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
    queues String
    The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
    scalingConfig Property Map
    Scaling configuration of the event source. Only available for SQS queues. Detailed below.
    selfManagedEventSource Property Map
    • (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
    selfManagedKafkaEventSourceConfig Property Map
    Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
    sourceAccessConfigurations List<Property Map>
    For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
    startingPosition String
    The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.
    startingPositionTimestamp String
    A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.
    state String
    The state of the event source mapping.
    stateTransitionReason String
    The reason the event source mapping is in its current state.
    topics List<String>
    The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
    tumblingWindowInSeconds Number
    The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
    uuid String
    The UUID of the created event source mapping.

    Supporting Types

    EventSourceMappingAmazonManagedKafkaEventSourceConfig, EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs

    ConsumerGroupId string
    A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
    ConsumerGroupId string
    A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
    consumerGroupId String
    A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
    consumerGroupId string
    A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
    consumer_group_id str
    A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
    consumerGroupId String
    A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.

    EventSourceMappingDestinationConfig, EventSourceMappingDestinationConfigArgs

    OnFailure EventSourceMappingDestinationConfigOnFailure
    The destination configuration for failed invocations. Detailed below.
    OnFailure EventSourceMappingDestinationConfigOnFailure
    The destination configuration for failed invocations. Detailed below.
    onFailure EventSourceMappingDestinationConfigOnFailure
    The destination configuration for failed invocations. Detailed below.
    onFailure EventSourceMappingDestinationConfigOnFailure
    The destination configuration for failed invocations. Detailed below.
    on_failure lambda_.EventSourceMappingDestinationConfigOnFailure
    The destination configuration for failed invocations. Detailed below.
    onFailure Property Map
    The destination configuration for failed invocations. Detailed below.

    EventSourceMappingDestinationConfigOnFailure, EventSourceMappingDestinationConfigOnFailureArgs

    EventSourceMappingDocumentDbEventSourceConfig, EventSourceMappingDocumentDbEventSourceConfigArgs

    DatabaseName string
    The name of the database to consume within the DocumentDB cluster.
    CollectionName string
    The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
    FullDocument string
    Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values: UpdateLookup, Default.
    DatabaseName string
    The name of the database to consume within the DocumentDB cluster.
    CollectionName string
    The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
    FullDocument string
    Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values: UpdateLookup, Default.
    databaseName String
    The name of the database to consume within the DocumentDB cluster.
    collectionName String
    The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
    fullDocument String
    Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values: UpdateLookup, Default.
    databaseName string
    The name of the database to consume within the DocumentDB cluster.
    collectionName string
    The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
    fullDocument string
    Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values: UpdateLookup, Default.
    database_name str
    The name of the database to consume within the DocumentDB cluster.
    collection_name str
    The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
    full_document str
    Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values: UpdateLookup, Default.
    databaseName String
    The name of the database to consume within the DocumentDB cluster.
    collectionName String
    The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
    fullDocument String
    Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values: UpdateLookup, Default.

    EventSourceMappingFilterCriteria, EventSourceMappingFilterCriteriaArgs

    Filters List<EventSourceMappingFilterCriteriaFilter>
    A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
    Filters []EventSourceMappingFilterCriteriaFilter
    A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
    filters List<EventSourceMappingFilterCriteriaFilter>
    A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
    filters EventSourceMappingFilterCriteriaFilter[]
    A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
    filters Sequence[lambda_.EventSourceMappingFilterCriteriaFilter]
    A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
    filters List<Property Map>
    A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.

    EventSourceMappingFilterCriteriaFilter, EventSourceMappingFilterCriteriaFilterArgs

    Pattern string
    Pattern string
    pattern String
    pattern string
    pattern String

    EventSourceMappingScalingConfig, EventSourceMappingScalingConfigArgs

    MaximumConcurrency int
    Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between 2 and 1000. See Configuring maximum concurrency for Amazon SQS event sources.
    MaximumConcurrency int
    Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between 2 and 1000. See Configuring maximum concurrency for Amazon SQS event sources.
    maximumConcurrency Integer
    Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between 2 and 1000. See Configuring maximum concurrency for Amazon SQS event sources.
    maximumConcurrency number
    Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between 2 and 1000. See Configuring maximum concurrency for Amazon SQS event sources.
    maximum_concurrency int
    Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between 2 and 1000. See Configuring maximum concurrency for Amazon SQS event sources.
    maximumConcurrency Number
    Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between 2 and 1000. See Configuring maximum concurrency for Amazon SQS event sources.

    EventSourceMappingSelfManagedEventSource, EventSourceMappingSelfManagedEventSourceArgs

    Endpoints Dictionary<string, string>
    A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints.
    Endpoints map[string]string
    A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints.
    endpoints Map<String,String>
    A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints.
    endpoints {[key: string]: string}
    A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints.
    endpoints Mapping[str, str]
    A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints.
    endpoints Map<String>
    A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints.

    EventSourceMappingSelfManagedKafkaEventSourceConfig, EventSourceMappingSelfManagedKafkaEventSourceConfigArgs

    ConsumerGroupId string
    A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
    ConsumerGroupId string
    A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
    consumerGroupId String
    A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
    consumerGroupId string
    A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
    consumer_group_id str
    A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
    consumerGroupId String
    A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.

    EventSourceMappingSourceAccessConfiguration, EventSourceMappingSourceAccessConfigurationArgs

    Type string
    The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation.
    Uri string
    The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws.ec2.Subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
    Type string
    The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation.
    Uri string
    The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws.ec2.Subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
    type String
    The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation.
    uri String
    The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws.ec2.Subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
    type string
    The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation.
    uri string
    The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws.ec2.Subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
    type str
    The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation.
    uri str
    The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws.ec2.Subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
    type String
    The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation.
    uri String
    The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws.ec2.Subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.

    Import

    Using pulumi import, import Lambda event source mappings using the UUID (event source mapping identifier). For example:

    $ pulumi import aws:lambda/eventSourceMapping:EventSourceMapping event_source_mapping 12345kxodurf3443
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    AWS Classic pulumi/pulumi-aws
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the aws Terraform Provider.
    aws logo

    Try AWS Native preview for resources not in the classic version.

    AWS Classic v6.40.0 published on Wednesday, Jun 12, 2024 by Pulumi