AWS Classic

Pulumi Official
Package maintained by Pulumi
v5.10.0 published on Monday, Jul 11, 2022 by Pulumi

EventSourceMapping

Provides a Lambda event source mapping. This allows Lambda functions to get events from Kinesis, DynamoDB, SQS, Amazon MQ and Managed Streaming for Apache Kafka (MSK).

For information about Lambda and how to use it, see What is AWS Lambda?. For information about event source mappings, see CreateEventSourceMapping in the API docs.

Example Usage

DynamoDB

using Pulumi;
using Aws = Pulumi.Aws;

class MyStack : Stack
{
    public MyStack()
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new Aws.Lambda.EventSourceMappingArgs
        {
            EventSourceArn = aws_dynamodb_table.Example.Stream_arn,
            FunctionName = aws_lambda_function.Example.Arn,
            StartingPosition = "LATEST",
        });
    }

}
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/lambda"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
			EventSourceArn:   pulumi.Any(aws_dynamodb_table.Example.Stream_arn),
			FunctionName:     pulumi.Any(aws_lambda_function.Example.Arn),
			StartingPosition: pulumi.String("LATEST"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()        
            .eventSourceArn(aws_dynamodb_table.example().stream_arn())
            .functionName(aws_lambda_function.example().arn())
            .startingPosition("LATEST")
            .build());

    }
}
import pulumi
import pulumi_aws as aws

example = aws.lambda_.EventSourceMapping("example",
    event_source_arn=aws_dynamodb_table["example"]["stream_arn"],
    function_name=aws_lambda_function["example"]["arn"],
    starting_position="LATEST")
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.lambda.EventSourceMapping("example", {
    eventSourceArn: aws_dynamodb_table.example.stream_arn,
    functionName: aws_lambda_function.example.arn,
    startingPosition: "LATEST",
});
resources:
  example:
    type: aws:lambda:EventSourceMapping
    properties:
      eventSourceArn: ${aws_dynamodb_table.example.stream_arn}
      functionName: ${aws_lambda_function.example.arn}
      startingPosition: LATEST

Kinesis

using Pulumi;
using Aws = Pulumi.Aws;

class MyStack : Stack
{
    public MyStack()
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new Aws.Lambda.EventSourceMappingArgs
        {
            EventSourceArn = aws_kinesis_stream.Example.Arn,
            FunctionName = aws_lambda_function.Example.Arn,
            StartingPosition = "LATEST",
        });
    }

}
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/lambda"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
			EventSourceArn:   pulumi.Any(aws_kinesis_stream.Example.Arn),
			FunctionName:     pulumi.Any(aws_lambda_function.Example.Arn),
			StartingPosition: pulumi.String("LATEST"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()        
            .eventSourceArn(aws_kinesis_stream.example().arn())
            .functionName(aws_lambda_function.example().arn())
            .startingPosition("LATEST")
            .build());

    }
}
import pulumi
import pulumi_aws as aws

example = aws.lambda_.EventSourceMapping("example",
    event_source_arn=aws_kinesis_stream["example"]["arn"],
    function_name=aws_lambda_function["example"]["arn"],
    starting_position="LATEST")
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.lambda.EventSourceMapping("example", {
    eventSourceArn: aws_kinesis_stream.example.arn,
    functionName: aws_lambda_function.example.arn,
    startingPosition: "LATEST",
});
resources:
  example:
    type: aws:lambda:EventSourceMapping
    properties:
      eventSourceArn: ${aws_kinesis_stream.example.arn}
      functionName: ${aws_lambda_function.example.arn}
      startingPosition: LATEST

Managed Streaming for Apache Kafka (MSK)

using Pulumi;
using Aws = Pulumi.Aws;

class MyStack : Stack
{
    public MyStack()
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new Aws.Lambda.EventSourceMappingArgs
        {
            EventSourceArn = aws_msk_cluster.Example.Arn,
            FunctionName = aws_lambda_function.Example.Arn,
            Topics = 
            {
                "Example",
            },
            StartingPosition = "TRIM_HORIZON",
        });
    }

}
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/lambda"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
			EventSourceArn: pulumi.Any(aws_msk_cluster.Example.Arn),
			FunctionName:   pulumi.Any(aws_lambda_function.Example.Arn),
			Topics: pulumi.StringArray{
				pulumi.String("Example"),
			},
			StartingPosition: pulumi.String("TRIM_HORIZON"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()        
            .eventSourceArn(aws_msk_cluster.example().arn())
            .functionName(aws_lambda_function.example().arn())
            .topics("Example")
            .startingPosition("TRIM_HORIZON")
            .build());

    }
}
import pulumi
import pulumi_aws as aws

example = aws.lambda_.EventSourceMapping("example",
    event_source_arn=aws_msk_cluster["example"]["arn"],
    function_name=aws_lambda_function["example"]["arn"],
    topics=["Example"],
    starting_position="TRIM_HORIZON")
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.lambda.EventSourceMapping("example", {
    eventSourceArn: aws_msk_cluster.example.arn,
    functionName: aws_lambda_function.example.arn,
    topics: ["Example"],
    startingPosition: "TRIM_HORIZON",
});
resources:
  example:
    type: aws:lambda:EventSourceMapping
    properties:
      eventSourceArn: ${aws_msk_cluster.example.arn}
      functionName: ${aws_lambda_function.example.arn}
      topics:
        - Example
      startingPosition: TRIM_HORIZON

Self Managed Apache Kafka

using Pulumi;
using Aws = Pulumi.Aws;

class MyStack : Stack
{
    public MyStack()
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new Aws.Lambda.EventSourceMappingArgs
        {
            FunctionName = aws_lambda_function.Example.Arn,
            Topics = 
            {
                "Example",
            },
            StartingPosition = "TRIM_HORIZON",
            SelfManagedEventSource = new Aws.Lambda.Inputs.EventSourceMappingSelfManagedEventSourceArgs
            {
                Endpoints = 
                {
                    { "KAFKA_BOOTSTRAP_SERVERS", "kafka1.example.com:9092,kafka2.example.com:9092" },
                },
            },
            SourceAccessConfigurations = 
            {
                new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
                {
                    Type = "VPC_SUBNET",
                    Uri = "subnet:subnet-example1",
                },
                new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
                {
                    Type = "VPC_SUBNET",
                    Uri = "subnet:subnet-example2",
                },
                new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
                {
                    Type = "VPC_SECURITY_GROUP",
                    Uri = "security_group:sg-example",
                },
            },
        });
    }

}
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/lambda"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
			FunctionName: pulumi.Any(aws_lambda_function.Example.Arn),
			Topics: pulumi.StringArray{
				pulumi.String("Example"),
			},
			StartingPosition: pulumi.String("TRIM_HORIZON"),
			SelfManagedEventSource: &lambda.EventSourceMappingSelfManagedEventSourceArgs{
				Endpoints: pulumi.StringMap{
					"KAFKA_BOOTSTRAP_SERVERS": pulumi.String("kafka1.example.com:9092,kafka2.example.com:9092"),
				},
			},
			SourceAccessConfigurations: lambda.EventSourceMappingSourceAccessConfigurationArray{
				&lambda.EventSourceMappingSourceAccessConfigurationArgs{
					Type: pulumi.String("VPC_SUBNET"),
					Uri:  pulumi.String("subnet:subnet-example1"),
				},
				&lambda.EventSourceMappingSourceAccessConfigurationArgs{
					Type: pulumi.String("VPC_SUBNET"),
					Uri:  pulumi.String("subnet:subnet-example2"),
				},
				&lambda.EventSourceMappingSourceAccessConfigurationArgs{
					Type: pulumi.String("VPC_SECURITY_GROUP"),
					Uri:  pulumi.String("security_group:sg-example"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()        
            .functionName(aws_lambda_function.example().arn())
            .topics("Example")
            .startingPosition("TRIM_HORIZON")
            .selfManagedEventSource(EventSourceMappingSelfManagedEventSourceArgs.builder()
                .endpoints(Map.of("KAFKA_BOOTSTRAP_SERVERS", "kafka1.example.com:9092,kafka2.example.com:9092"))
                .build())
            .sourceAccessConfigurations(            
                EventSourceMappingSourceAccessConfigurationArgs.builder()
                    .type("VPC_SUBNET")
                    .uri("subnet:subnet-example1")
                    .build(),
                EventSourceMappingSourceAccessConfigurationArgs.builder()
                    .type("VPC_SUBNET")
                    .uri("subnet:subnet-example2")
                    .build(),
                EventSourceMappingSourceAccessConfigurationArgs.builder()
                    .type("VPC_SECURITY_GROUP")
                    .uri("security_group:sg-example")
                    .build())
            .build());

    }
}
import pulumi
import pulumi_aws as aws

example = aws.lambda_.EventSourceMapping("example",
    function_name=aws_lambda_function["example"]["arn"],
    topics=["Example"],
    starting_position="TRIM_HORIZON",
    self_managed_event_source=aws.lambda..EventSourceMappingSelfManagedEventSourceArgs(
        endpoints={
            "KAFKA_BOOTSTRAP_SERVERS": "kafka1.example.com:9092,kafka2.example.com:9092",
        },
    ),
    source_access_configurations=[
        aws.lambda..EventSourceMappingSourceAccessConfigurationArgs(
            type="VPC_SUBNET",
            uri="subnet:subnet-example1",
        ),
        aws.lambda..EventSourceMappingSourceAccessConfigurationArgs(
            type="VPC_SUBNET",
            uri="subnet:subnet-example2",
        ),
        aws.lambda..EventSourceMappingSourceAccessConfigurationArgs(
            type="VPC_SECURITY_GROUP",
            uri="security_group:sg-example",
        ),
    ])
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.lambda.EventSourceMapping("example", {
    functionName: aws_lambda_function.example.arn,
    topics: ["Example"],
    startingPosition: "TRIM_HORIZON",
    selfManagedEventSource: {
        endpoints: {
            KAFKA_BOOTSTRAP_SERVERS: "kafka1.example.com:9092,kafka2.example.com:9092",
        },
    },
    sourceAccessConfigurations: [
        {
            type: "VPC_SUBNET",
            uri: "subnet:subnet-example1",
        },
        {
            type: "VPC_SUBNET",
            uri: "subnet:subnet-example2",
        },
        {
            type: "VPC_SECURITY_GROUP",
            uri: "security_group:sg-example",
        },
    ],
});
resources:
  example:
    type: aws:lambda:EventSourceMapping
    properties:
      functionName: ${aws_lambda_function.example.arn}
      topics:
        - Example
      startingPosition: TRIM_HORIZON
      selfManagedEventSource:
        endpoints:
          KAFKA_BOOTSTRAP_SERVERS: kafka1.example.com:9092,kafka2.example.com:9092
      sourceAccessConfigurations:
        - type: VPC_SUBNET
          uri: subnet:subnet-example1
        - type: VPC_SUBNET
          uri: subnet:subnet-example2
        - type: VPC_SECURITY_GROUP
          uri: security_group:sg-example

SQS

using Pulumi;
using Aws = Pulumi.Aws;

class MyStack : Stack
{
    public MyStack()
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new Aws.Lambda.EventSourceMappingArgs
        {
            EventSourceArn = aws_sqs_queue.Sqs_queue_test.Arn,
            FunctionName = aws_lambda_function.Example.Arn,
        });
    }

}
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/lambda"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
			EventSourceArn: pulumi.Any(aws_sqs_queue.Sqs_queue_test.Arn),
			FunctionName:   pulumi.Any(aws_lambda_function.Example.Arn),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()        
            .eventSourceArn(aws_sqs_queue.sqs_queue_test().arn())
            .functionName(aws_lambda_function.example().arn())
            .build());

    }
}
import pulumi
import pulumi_aws as aws

example = aws.lambda_.EventSourceMapping("example",
    event_source_arn=aws_sqs_queue["sqs_queue_test"]["arn"],
    function_name=aws_lambda_function["example"]["arn"])
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.lambda.EventSourceMapping("example", {
    eventSourceArn: aws_sqs_queue.sqs_queue_test.arn,
    functionName: aws_lambda_function.example.arn,
});
resources:
  example:
    type: aws:lambda:EventSourceMapping
    properties:
      eventSourceArn: ${aws_sqs_queue.sqs_queue_test.arn}
      functionName: ${aws_lambda_function.example.arn}

SQS with event filter

using System.Collections.Generic;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;

class MyStack : Stack
{
    public MyStack()
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new Aws.Lambda.EventSourceMappingArgs
        {
            EventSourceArn = aws_sqs_queue.Sqs_queue_test.Arn,
            FunctionName = aws_lambda_function.Example.Arn,
            FilterCriteria = new Aws.Lambda.Inputs.EventSourceMappingFilterCriteriaArgs
            {
                Filters = 
                {
                    new Aws.Lambda.Inputs.EventSourceMappingFilterCriteriaFilterArgs
                    {
                        Pattern = JsonSerializer.Serialize(new Dictionary<string, object?>
                        {
                            { "body", new Dictionary<string, object?>
                            {
                                { "Temperature", new[]
                                    {
                                        new Dictionary<string, object?>
                                        {
                                            { "numeric", new[]
                                                {
                                                    ">",
                                                    0,
                                                    "<=",
                                                    100,
                                                }
                                             },
                                        },
                                    }
                                 },
                                { "Location", new[]
                                    {
                                        "New York",
                                    }
                                 },
                            } },
                        }),
                    },
                },
            },
        });
    }

}
package main

import (
	"encoding/json"

	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/lambda"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		tmpJSON0, err := json.Marshal(map[string]interface{}{
			"body": map[string]interface{}{
				"Temperature": []map[string]interface{}{
					map[string]interface{}{
						"numeric": []interface{}{
							">",
							0,
							"<=",
							100,
						},
					},
				},
				"Location": []string{
					"New York",
				},
			},
		})
		if err != nil {
			return err
		}
		json0 := string(tmpJSON0)
		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
			EventSourceArn: pulumi.Any(aws_sqs_queue.Sqs_queue_test.Arn),
			FunctionName:   pulumi.Any(aws_lambda_function.Example.Arn),
			FilterCriteria: &lambda.EventSourceMappingFilterCriteriaArgs{
				Filters: lambda.EventSourceMappingFilterCriteriaFilterArray{
					&lambda.EventSourceMappingFilterCriteriaFilterArgs{
						Pattern: pulumi.String(json0),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;
import static com.pulumi.codegen.internal.Serialization.*;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()        
            .eventSourceArn(aws_sqs_queue.sqs_queue_test().arn())
            .functionName(aws_lambda_function.example().arn())
            .filterCriteria(EventSourceMappingFilterCriteriaArgs.builder()
                .filters(EventSourceMappingFilterCriteriaFilterArgs.builder()
                    .pattern(serializeJson(
                        jsonObject(
                            jsonProperty("body", jsonObject(
                                jsonProperty("Temperature", jsonArray(jsonObject(
                                    jsonProperty("numeric", jsonArray(
                                        ">", 
                                        0, 
                                        "<=", 
                                        100
                                    ))
                                ))),
                                jsonProperty("Location", jsonArray("New York"))
                            ))
                        )))
                    .build())
                .build())
            .build());

    }
}
import pulumi
import json
import pulumi_aws as aws

example = aws.lambda_.EventSourceMapping("example",
    event_source_arn=aws_sqs_queue["sqs_queue_test"]["arn"],
    function_name=aws_lambda_function["example"]["arn"],
    filter_criteria=aws.lambda..EventSourceMappingFilterCriteriaArgs(
        filters=[aws.lambda..EventSourceMappingFilterCriteriaFilterArgs(
            pattern=json.dumps({
                "body": {
                    "Temperature": [{
                        "numeric": [
                            ">",
                            0,
                            "<=",
                            100,
                        ],
                    }],
                    "Location": ["New York"],
                },
            }),
        )],
    ))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.lambda.EventSourceMapping("example", {
    eventSourceArn: aws_sqs_queue.sqs_queue_test.arn,
    functionName: aws_lambda_function.example.arn,
    filterCriteria: {
        filters: [{
            pattern: JSON.stringify({
                body: {
                    Temperature: [{
                        numeric: [
                            ">",
                            0,
                            "<=",
                            100,
                        ],
                    }],
                    Location: ["New York"],
                },
            }),
        }],
    },
});
resources:
  example:
    type: aws:lambda:EventSourceMapping
    properties:
      eventSourceArn: ${aws_sqs_queue.sqs_queue_test.arn}
      functionName: ${aws_lambda_function.example.arn}
      filterCriteria:
        filters:
          - pattern:
              Fn::ToJSON:
                body:
                  Temperature:
                    - numeric:
                        - '>'
                        - 0
                        - <=
                        - 100
                  Location:
                    - New York

Amazon MQ (ActiveMQ)

using Pulumi;
using Aws = Pulumi.Aws;

class MyStack : Stack
{
    public MyStack()
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new Aws.Lambda.EventSourceMappingArgs
        {
            BatchSize = 10,
            EventSourceArn = aws_mq_broker.Example.Arn,
            Enabled = true,
            FunctionName = aws_lambda_function.Example.Arn,
            Queues = 
            {
                "example",
            },
            SourceAccessConfigurations = 
            {
                new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
                {
                    Type = "BASIC_AUTH",
                    Uri = aws_secretsmanager_secret_version.Example.Arn,
                },
            },
        });
    }

}
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/lambda"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
			BatchSize:      pulumi.Int(10),
			EventSourceArn: pulumi.Any(aws_mq_broker.Example.Arn),
			Enabled:        pulumi.Bool(true),
			FunctionName:   pulumi.Any(aws_lambda_function.Example.Arn),
			Queues: pulumi.StringArray{
				pulumi.String("example"),
			},
			SourceAccessConfigurations: lambda.EventSourceMappingSourceAccessConfigurationArray{
				&lambda.EventSourceMappingSourceAccessConfigurationArgs{
					Type: pulumi.String("BASIC_AUTH"),
					Uri:  pulumi.Any(aws_secretsmanager_secret_version.Example.Arn),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()        
            .batchSize(10)
            .eventSourceArn(aws_mq_broker.example().arn())
            .enabled(true)
            .functionName(aws_lambda_function.example().arn())
            .queues("example")
            .sourceAccessConfigurations(EventSourceMappingSourceAccessConfigurationArgs.builder()
                .type("BASIC_AUTH")
                .uri(aws_secretsmanager_secret_version.example().arn())
                .build())
            .build());

    }
}
import pulumi
import pulumi_aws as aws

example = aws.lambda_.EventSourceMapping("example",
    batch_size=10,
    event_source_arn=aws_mq_broker["example"]["arn"],
    enabled=True,
    function_name=aws_lambda_function["example"]["arn"],
    queues=["example"],
    source_access_configurations=[aws.lambda..EventSourceMappingSourceAccessConfigurationArgs(
        type="BASIC_AUTH",
        uri=aws_secretsmanager_secret_version["example"]["arn"],
    )])
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.lambda.EventSourceMapping("example", {
    batchSize: 10,
    eventSourceArn: aws_mq_broker.example.arn,
    enabled: true,
    functionName: aws_lambda_function.example.arn,
    queues: ["example"],
    sourceAccessConfigurations: [{
        type: "BASIC_AUTH",
        uri: aws_secretsmanager_secret_version.example.arn,
    }],
});
resources:
  example:
    type: aws:lambda:EventSourceMapping
    properties:
      batchSize: 10
      eventSourceArn: ${aws_mq_broker.example.arn}
      enabled: true
      functionName: ${aws_lambda_function.example.arn}
      queues:
        - example
      sourceAccessConfigurations:
        - type: BASIC_AUTH
          uri: ${aws_secretsmanager_secret_version.example.arn}

Amazon MQ (RabbitMQ)

using Pulumi;
using Aws = Pulumi.Aws;

class MyStack : Stack
{
    public MyStack()
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new Aws.Lambda.EventSourceMappingArgs
        {
            BatchSize = 1,
            EventSourceArn = aws_mq_broker.Example.Arn,
            Enabled = true,
            FunctionName = aws_lambda_function.Example.Arn,
            Queues = 
            {
                "example",
            },
            SourceAccessConfigurations = 
            {
                new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
                {
                    Type = "VIRTUAL_HOST",
                    Uri = "/example",
                },
                new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
                {
                    Type = "BASIC_AUTH",
                    Uri = aws_secretsmanager_secret_version.Example.Arn,
                },
            },
        });
    }

}
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/lambda"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
			BatchSize:      pulumi.Int(1),
			EventSourceArn: pulumi.Any(aws_mq_broker.Example.Arn),
			Enabled:        pulumi.Bool(true),
			FunctionName:   pulumi.Any(aws_lambda_function.Example.Arn),
			Queues: pulumi.StringArray{
				pulumi.String("example"),
			},
			SourceAccessConfigurations: lambda.EventSourceMappingSourceAccessConfigurationArray{
				&lambda.EventSourceMappingSourceAccessConfigurationArgs{
					Type: pulumi.String("VIRTUAL_HOST"),
					Uri:  pulumi.String("/example"),
				},
				&lambda.EventSourceMappingSourceAccessConfigurationArgs{
					Type: pulumi.String("BASIC_AUTH"),
					Uri:  pulumi.Any(aws_secretsmanager_secret_version.Example.Arn),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()        
            .batchSize(1)
            .eventSourceArn(aws_mq_broker.example().arn())
            .enabled(true)
            .functionName(aws_lambda_function.example().arn())
            .queues("example")
            .sourceAccessConfigurations(            
                EventSourceMappingSourceAccessConfigurationArgs.builder()
                    .type("VIRTUAL_HOST")
                    .uri("/example")
                    .build(),
                EventSourceMappingSourceAccessConfigurationArgs.builder()
                    .type("BASIC_AUTH")
                    .uri(aws_secretsmanager_secret_version.example().arn())
                    .build())
            .build());

    }
}
import pulumi
import pulumi_aws as aws

example = aws.lambda_.EventSourceMapping("example",
    batch_size=1,
    event_source_arn=aws_mq_broker["example"]["arn"],
    enabled=True,
    function_name=aws_lambda_function["example"]["arn"],
    queues=["example"],
    source_access_configurations=[
        aws.lambda..EventSourceMappingSourceAccessConfigurationArgs(
            type="VIRTUAL_HOST",
            uri="/example",
        ),
        aws.lambda..EventSourceMappingSourceAccessConfigurationArgs(
            type="BASIC_AUTH",
            uri=aws_secretsmanager_secret_version["example"]["arn"],
        ),
    ])
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.lambda.EventSourceMapping("example", {
    batchSize: 1,
    eventSourceArn: aws_mq_broker.example.arn,
    enabled: true,
    functionName: aws_lambda_function.example.arn,
    queues: ["example"],
    sourceAccessConfigurations: [
        {
            type: "VIRTUAL_HOST",
            uri: "/example",
        },
        {
            type: "BASIC_AUTH",
            uri: aws_secretsmanager_secret_version.example.arn,
        },
    ],
});
resources:
  example:
    type: aws:lambda:EventSourceMapping
    properties:
      batchSize: 1
      eventSourceArn: ${aws_mq_broker.example.arn}
      enabled: true
      functionName: ${aws_lambda_function.example.arn}
      queues:
        - example
      sourceAccessConfigurations:
        - type: VIRTUAL_HOST
          uri: /example
        - type: BASIC_AUTH
          uri: ${aws_secretsmanager_secret_version.example.arn}

Managed Streaming for Kafka (MSK)

using Pulumi;
using Aws = Pulumi.Aws;

class MyStack : Stack
{
    public MyStack()
    {
        var example = new Aws.Lambda.EventSourceMapping("example", new Aws.Lambda.EventSourceMappingArgs
        {
            EventSourceArn = aws_msk_cluster.Example.Arn,
            FunctionName = aws_lambda_function.Example.Arn,
            Topics = 
            {
                "Example",
            },
            StartingPosition = "TRIM_HORIZON",
        });
    }

}
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/lambda"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
			EventSourceArn: pulumi.Any(aws_msk_cluster.Example.Arn),
			FunctionName:   pulumi.Any(aws_lambda_function.Example.Arn),
			Topics: pulumi.StringArray{
				pulumi.String("Example"),
			},
			StartingPosition: pulumi.String("TRIM_HORIZON"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()        
            .eventSourceArn(aws_msk_cluster.example().arn())
            .functionName(aws_lambda_function.example().arn())
            .topics("Example")
            .startingPosition("TRIM_HORIZON")
            .build());

    }
}
import pulumi
import pulumi_aws as aws

example = aws.lambda_.EventSourceMapping("example",
    event_source_arn=aws_msk_cluster["example"]["arn"],
    function_name=aws_lambda_function["example"]["arn"],
    topics=["Example"],
    starting_position="TRIM_HORIZON")
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.lambda.EventSourceMapping("example", {
    eventSourceArn: aws_msk_cluster.example.arn,
    functionName: aws_lambda_function.example.arn,
    topics: ["Example"],
    startingPosition: "TRIM_HORIZON",
});
resources:
  example:
    type: aws:lambda:EventSourceMapping
    properties:
      eventSourceArn: ${aws_msk_cluster.example.arn}
      functionName: ${aws_lambda_function.example.arn}
      topics:
        - Example
      startingPosition: TRIM_HORIZON

Create a EventSourceMapping Resource

new EventSourceMapping(name: string, args: EventSourceMappingArgs, opts?: CustomResourceOptions);
@overload
def EventSourceMapping(resource_name: str,
                       opts: Optional[ResourceOptions] = None,
                       batch_size: Optional[int] = None,
                       bisect_batch_on_function_error: Optional[bool] = None,
                       destination_config: Optional[_lambda_.EventSourceMappingDestinationConfigArgs] = None,
                       enabled: Optional[bool] = None,
                       event_source_arn: Optional[str] = None,
                       filter_criteria: Optional[_lambda_.EventSourceMappingFilterCriteriaArgs] = None,
                       function_name: Optional[str] = None,
                       function_response_types: Optional[Sequence[str]] = None,
                       maximum_batching_window_in_seconds: Optional[int] = None,
                       maximum_record_age_in_seconds: Optional[int] = None,
                       maximum_retry_attempts: Optional[int] = None,
                       parallelization_factor: Optional[int] = None,
                       queues: Optional[Sequence[str]] = None,
                       self_managed_event_source: Optional[_lambda_.EventSourceMappingSelfManagedEventSourceArgs] = None,
                       source_access_configurations: Optional[Sequence[_lambda_.EventSourceMappingSourceAccessConfigurationArgs]] = None,
                       starting_position: Optional[str] = None,
                       starting_position_timestamp: Optional[str] = None,
                       topics: Optional[Sequence[str]] = None,
                       tumbling_window_in_seconds: Optional[int] = None)
@overload
def EventSourceMapping(resource_name: str,
                       args: EventSourceMappingArgs,
                       opts: Optional[ResourceOptions] = None)
func NewEventSourceMapping(ctx *Context, name string, args EventSourceMappingArgs, opts ...ResourceOption) (*EventSourceMapping, error)
public EventSourceMapping(string name, EventSourceMappingArgs args, CustomResourceOptions? opts = null)
public EventSourceMapping(String name, EventSourceMappingArgs args)
public EventSourceMapping(String name, EventSourceMappingArgs args, CustomResourceOptions options)
type: aws:lambda:EventSourceMapping
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

name string
The unique name of the resource.
args EventSourceMappingArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name str
The unique name of the resource.
args EventSourceMappingArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name string
The unique name of the resource.
args EventSourceMappingArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name string
The unique name of the resource.
args EventSourceMappingArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name String
The unique name of the resource.
args EventSourceMappingArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

EventSourceMapping Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

The EventSourceMapping resource accepts the following input properties:

FunctionName string

The name or the ARN of the Lambda function that will be subscribing to events.

BatchSize int

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

  • bisect_batch_on_function_error: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
  • destination_config: - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
BisectBatchOnFunctionError bool
DestinationConfig EventSourceMappingDestinationConfigArgs
Enabled bool

Determines if the mapping will be enabled on creation. Defaults to true.

EventSourceArn string

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker or MSK cluster. It is incompatible with a Self Managed Kafka source.

FilterCriteria EventSourceMappingFilterCriteriaArgs

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

FunctionResponseTypes List<string>

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

MaximumBatchingWindowInSeconds int

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

  • maximum_record_age_in_seconds: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
  • maximum_retry_attempts: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
  • parallelization_factor: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
MaximumRecordAgeInSeconds int
MaximumRetryAttempts int
ParallelizationFactor int
Queues List<string>

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

  • self_managed_event_source: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
  • source_access_configuration: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
SelfManagedEventSource EventSourceMappingSelfManagedEventSourceArgs
SourceAccessConfigurations List<EventSourceMappingSourceAccessConfigurationArgs>
StartingPosition string

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB or MSK. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

StartingPositionTimestamp string

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

Topics List<string>

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

TumblingWindowInSeconds int

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

FunctionName string

The name or the ARN of the Lambda function that will be subscribing to events.

BatchSize int

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

  • bisect_batch_on_function_error: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
  • destination_config: - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
BisectBatchOnFunctionError bool
DestinationConfig EventSourceMappingDestinationConfigArgs
Enabled bool

Determines if the mapping will be enabled on creation. Defaults to true.

EventSourceArn string

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker or MSK cluster. It is incompatible with a Self Managed Kafka source.

FilterCriteria EventSourceMappingFilterCriteriaArgs

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

FunctionResponseTypes []string

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

MaximumBatchingWindowInSeconds int

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

  • maximum_record_age_in_seconds: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
  • maximum_retry_attempts: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
  • parallelization_factor: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
MaximumRecordAgeInSeconds int
MaximumRetryAttempts int
ParallelizationFactor int
Queues []string

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

  • self_managed_event_source: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
  • source_access_configuration: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
SelfManagedEventSource EventSourceMappingSelfManagedEventSourceArgs
SourceAccessConfigurations []EventSourceMappingSourceAccessConfigurationArgs
StartingPosition string

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB or MSK. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

StartingPositionTimestamp string

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

Topics []string

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

TumblingWindowInSeconds int

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

functionName String

The name or the ARN of the Lambda function that will be subscribing to events.

batchSize Integer

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

  • bisect_batch_on_function_error: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
  • destination_config: - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
bisectBatchOnFunctionError Boolean
destinationConfig EventSourceMappingDestinationConfigArgs
enabled Boolean

Determines if the mapping will be enabled on creation. Defaults to true.

eventSourceArn String

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker or MSK cluster. It is incompatible with a Self Managed Kafka source.

filterCriteria EventSourceMappingFilterCriteriaArgs

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

functionResponseTypes List<String>

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

maximumBatchingWindowInSeconds Integer

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

  • maximum_record_age_in_seconds: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
  • maximum_retry_attempts: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
  • parallelization_factor: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
maximumRecordAgeInSeconds Integer
maximumRetryAttempts Integer
parallelizationFactor Integer
queues List<String>

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

  • self_managed_event_source: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
  • source_access_configuration: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
selfManagedEventSource EventSourceMappingSelfManagedEventSourceArgs
sourceAccessConfigurations List<EventSourceMappingSourceAccessConfigurationArgs>
startingPosition String

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB or MSK. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

startingPositionTimestamp String

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

topics List<String>

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

tumblingWindowInSeconds Integer

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

functionName string

The name or the ARN of the Lambda function that will be subscribing to events.

batchSize number

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

  • bisect_batch_on_function_error: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
  • destination_config: - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
bisectBatchOnFunctionError boolean
destinationConfig EventSourceMappingDestinationConfigArgs
enabled boolean

Determines if the mapping will be enabled on creation. Defaults to true.

eventSourceArn string

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker or MSK cluster. It is incompatible with a Self Managed Kafka source.

filterCriteria EventSourceMappingFilterCriteriaArgs

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

functionResponseTypes string[]

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

maximumBatchingWindowInSeconds number

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

  • maximum_record_age_in_seconds: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
  • maximum_retry_attempts: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
  • parallelization_factor: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
maximumRecordAgeInSeconds number
maximumRetryAttempts number
parallelizationFactor number
queues string[]

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

  • self_managed_event_source: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
  • source_access_configuration: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
selfManagedEventSource EventSourceMappingSelfManagedEventSourceArgs
sourceAccessConfigurations EventSourceMappingSourceAccessConfigurationArgs[]
startingPosition string

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB or MSK. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

startingPositionTimestamp string

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

topics string[]

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

tumblingWindowInSeconds number

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

function_name str

The name or the ARN of the Lambda function that will be subscribing to events.

batch_size int

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

  • bisect_batch_on_function_error: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
  • destination_config: - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
bisect_batch_on_function_error bool
destination_config EventSourceMappingDestinationConfigArgs
enabled bool

Determines if the mapping will be enabled on creation. Defaults to true.

event_source_arn str

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker or MSK cluster. It is incompatible with a Self Managed Kafka source.

filter_criteria EventSourceMappingFilterCriteriaArgs

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

function_response_types Sequence[str]

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

maximum_batching_window_in_seconds int

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

  • maximum_record_age_in_seconds: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
  • maximum_retry_attempts: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
  • parallelization_factor: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
maximum_record_age_in_seconds int
maximum_retry_attempts int
parallelization_factor int
queues Sequence[str]

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

  • self_managed_event_source: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
  • source_access_configuration: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
self_managed_event_source EventSourceMappingSelfManagedEventSourceArgs
source_access_configurations EventSourceMappingSourceAccessConfigurationArgs]
starting_position str

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB or MSK. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

starting_position_timestamp str

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

topics Sequence[str]

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

tumbling_window_in_seconds int

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

functionName String

The name or the ARN of the Lambda function that will be subscribing to events.

batchSize Number

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

  • bisect_batch_on_function_error: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
  • destination_config: - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
bisectBatchOnFunctionError Boolean
destinationConfig Property Map
enabled Boolean

Determines if the mapping will be enabled on creation. Defaults to true.

eventSourceArn String

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker or MSK cluster. It is incompatible with a Self Managed Kafka source.

filterCriteria Property Map

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

functionResponseTypes List<String>

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

maximumBatchingWindowInSeconds Number

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

  • maximum_record_age_in_seconds: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
  • maximum_retry_attempts: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
  • parallelization_factor: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
maximumRecordAgeInSeconds Number
maximumRetryAttempts Number
parallelizationFactor Number
queues List<String>

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

  • self_managed_event_source: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
  • source_access_configuration: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
selfManagedEventSource Property Map
sourceAccessConfigurations List<Property Map>
startingPosition String

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB or MSK. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

startingPositionTimestamp String

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

topics List<String>

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

tumblingWindowInSeconds Number

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

Outputs

All input properties are implicitly available as output properties. Additionally, the EventSourceMapping resource produces the following output properties:

FunctionArn string

The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)

Id string

The provider-assigned unique ID for this managed resource.

LastModified string

The date this resource was last modified.

LastProcessingResult string

The result of the last AWS Lambda invocation of your Lambda function.

State string

The state of the event source mapping.

StateTransitionReason string

The reason the event source mapping is in its current state.

Uuid string

The UUID of the created event source mapping.

FunctionArn string

The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)

Id string

The provider-assigned unique ID for this managed resource.

LastModified string

The date this resource was last modified.

LastProcessingResult string

The result of the last AWS Lambda invocation of your Lambda function.

State string

The state of the event source mapping.

StateTransitionReason string

The reason the event source mapping is in its current state.

Uuid string

The UUID of the created event source mapping.

functionArn String

The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)

id String

The provider-assigned unique ID for this managed resource.

lastModified String

The date this resource was last modified.

lastProcessingResult String

The result of the last AWS Lambda invocation of your Lambda function.

state String

The state of the event source mapping.

stateTransitionReason String

The reason the event source mapping is in its current state.

uuid String

The UUID of the created event source mapping.

functionArn string

The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)

id string

The provider-assigned unique ID for this managed resource.

lastModified string

The date this resource was last modified.

lastProcessingResult string

The result of the last AWS Lambda invocation of your Lambda function.

state string

The state of the event source mapping.

stateTransitionReason string

The reason the event source mapping is in its current state.

uuid string

The UUID of the created event source mapping.

function_arn str

The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)

id str

The provider-assigned unique ID for this managed resource.

last_modified str

The date this resource was last modified.

last_processing_result str

The result of the last AWS Lambda invocation of your Lambda function.

state str

The state of the event source mapping.

state_transition_reason str

The reason the event source mapping is in its current state.

uuid str

The UUID of the created event source mapping.

functionArn String

The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)

id String

The provider-assigned unique ID for this managed resource.

lastModified String

The date this resource was last modified.

lastProcessingResult String

The result of the last AWS Lambda invocation of your Lambda function.

state String

The state of the event source mapping.

stateTransitionReason String

The reason the event source mapping is in its current state.

uuid String

The UUID of the created event source mapping.

Look up an Existing EventSourceMapping Resource

Get an existing EventSourceMapping resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: EventSourceMappingState, opts?: CustomResourceOptions): EventSourceMapping
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        batch_size: Optional[int] = None,
        bisect_batch_on_function_error: Optional[bool] = None,
        destination_config: Optional[_lambda_.EventSourceMappingDestinationConfigArgs] = None,
        enabled: Optional[bool] = None,
        event_source_arn: Optional[str] = None,
        filter_criteria: Optional[_lambda_.EventSourceMappingFilterCriteriaArgs] = None,
        function_arn: Optional[str] = None,
        function_name: Optional[str] = None,
        function_response_types: Optional[Sequence[str]] = None,
        last_modified: Optional[str] = None,
        last_processing_result: Optional[str] = None,
        maximum_batching_window_in_seconds: Optional[int] = None,
        maximum_record_age_in_seconds: Optional[int] = None,
        maximum_retry_attempts: Optional[int] = None,
        parallelization_factor: Optional[int] = None,
        queues: Optional[Sequence[str]] = None,
        self_managed_event_source: Optional[_lambda_.EventSourceMappingSelfManagedEventSourceArgs] = None,
        source_access_configurations: Optional[Sequence[_lambda_.EventSourceMappingSourceAccessConfigurationArgs]] = None,
        starting_position: Optional[str] = None,
        starting_position_timestamp: Optional[str] = None,
        state: Optional[str] = None,
        state_transition_reason: Optional[str] = None,
        topics: Optional[Sequence[str]] = None,
        tumbling_window_in_seconds: Optional[int] = None,
        uuid: Optional[str] = None) -> EventSourceMapping
func GetEventSourceMapping(ctx *Context, name string, id IDInput, state *EventSourceMappingState, opts ...ResourceOption) (*EventSourceMapping, error)
public static EventSourceMapping Get(string name, Input<string> id, EventSourceMappingState? state, CustomResourceOptions? opts = null)
public static EventSourceMapping get(String name, Output<String> id, EventSourceMappingState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
BatchSize int

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

  • bisect_batch_on_function_error: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
  • destination_config: - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
BisectBatchOnFunctionError bool
DestinationConfig EventSourceMappingDestinationConfigArgs
Enabled bool

Determines if the mapping will be enabled on creation. Defaults to true.

EventSourceArn string

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker or MSK cluster. It is incompatible with a Self Managed Kafka source.

FilterCriteria EventSourceMappingFilterCriteriaArgs

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

FunctionArn string

The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)

FunctionName string

The name or the ARN of the Lambda function that will be subscribing to events.

FunctionResponseTypes List<string>

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

LastModified string

The date this resource was last modified.

LastProcessingResult string

The result of the last AWS Lambda invocation of your Lambda function.

MaximumBatchingWindowInSeconds int

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

  • maximum_record_age_in_seconds: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
  • maximum_retry_attempts: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
  • parallelization_factor: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
MaximumRecordAgeInSeconds int
MaximumRetryAttempts int
ParallelizationFactor int
Queues List<string>

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

  • self_managed_event_source: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
  • source_access_configuration: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
SelfManagedEventSource EventSourceMappingSelfManagedEventSourceArgs
SourceAccessConfigurations List<EventSourceMappingSourceAccessConfigurationArgs>
StartingPosition string

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB or MSK. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

StartingPositionTimestamp string

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

State string

The state of the event source mapping.

StateTransitionReason string

The reason the event source mapping is in its current state.

Topics List<string>

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

TumblingWindowInSeconds int

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

Uuid string

The UUID of the created event source mapping.

BatchSize int

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

  • bisect_batch_on_function_error: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
  • destination_config: - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
BisectBatchOnFunctionError bool
DestinationConfig EventSourceMappingDestinationConfigArgs
Enabled bool

Determines if the mapping will be enabled on creation. Defaults to true.

EventSourceArn string

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker or MSK cluster. It is incompatible with a Self Managed Kafka source.

FilterCriteria EventSourceMappingFilterCriteriaArgs

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

FunctionArn string

The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)

FunctionName string

The name or the ARN of the Lambda function that will be subscribing to events.

FunctionResponseTypes []string

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

LastModified string

The date this resource was last modified.

LastProcessingResult string

The result of the last AWS Lambda invocation of your Lambda function.

MaximumBatchingWindowInSeconds int

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

  • maximum_record_age_in_seconds: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
  • maximum_retry_attempts: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
  • parallelization_factor: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
MaximumRecordAgeInSeconds int
MaximumRetryAttempts int
ParallelizationFactor int
Queues []string

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

  • self_managed_event_source: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
  • source_access_configuration: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
SelfManagedEventSource EventSourceMappingSelfManagedEventSourceArgs
SourceAccessConfigurations []EventSourceMappingSourceAccessConfigurationArgs
StartingPosition string

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB or MSK. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

StartingPositionTimestamp string

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

State string

The state of the event source mapping.

StateTransitionReason string

The reason the event source mapping is in its current state.

Topics []string

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

TumblingWindowInSeconds int

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

Uuid string

The UUID of the created event source mapping.

batchSize Integer

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

  • bisect_batch_on_function_error: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
  • destination_config: - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
bisectBatchOnFunctionError Boolean
destinationConfig EventSourceMappingDestinationConfigArgs
enabled Boolean

Determines if the mapping will be enabled on creation. Defaults to true.

eventSourceArn String

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker or MSK cluster. It is incompatible with a Self Managed Kafka source.

filterCriteria EventSourceMappingFilterCriteriaArgs

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

functionArn String

The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)

functionName String

The name or the ARN of the Lambda function that will be subscribing to events.

functionResponseTypes List<String>

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

lastModified String

The date this resource was last modified.

lastProcessingResult String

The result of the last AWS Lambda invocation of your Lambda function.

maximumBatchingWindowInSeconds Integer

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

  • maximum_record_age_in_seconds: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
  • maximum_retry_attempts: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
  • parallelization_factor: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
maximumRecordAgeInSeconds Integer
maximumRetryAttempts Integer
parallelizationFactor Integer
queues List<String>

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

  • self_managed_event_source: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
  • source_access_configuration: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
selfManagedEventSource EventSourceMappingSelfManagedEventSourceArgs
sourceAccessConfigurations List<EventSourceMappingSourceAccessConfigurationArgs>
startingPosition String

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB or MSK. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

startingPositionTimestamp String

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

state String

The state of the event source mapping.

stateTransitionReason String

The reason the event source mapping is in its current state.

topics List<String>

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

tumblingWindowInSeconds Integer

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

uuid String

The UUID of the created event source mapping.

batchSize number

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

  • bisect_batch_on_function_error: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
  • destination_config: - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
bisectBatchOnFunctionError boolean
destinationConfig EventSourceMappingDestinationConfigArgs
enabled boolean

Determines if the mapping will be enabled on creation. Defaults to true.

eventSourceArn string

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker or MSK cluster. It is incompatible with a Self Managed Kafka source.

filterCriteria EventSourceMappingFilterCriteriaArgs

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

functionArn string

The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)

functionName string

The name or the ARN of the Lambda function that will be subscribing to events.

functionResponseTypes string[]

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

lastModified string

The date this resource was last modified.

lastProcessingResult string

The result of the last AWS Lambda invocation of your Lambda function.

maximumBatchingWindowInSeconds number

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

  • maximum_record_age_in_seconds: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
  • maximum_retry_attempts: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
  • parallelization_factor: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
maximumRecordAgeInSeconds number
maximumRetryAttempts number
parallelizationFactor number
queues string[]

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

  • self_managed_event_source: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
  • source_access_configuration: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
selfManagedEventSource EventSourceMappingSelfManagedEventSourceArgs
sourceAccessConfigurations EventSourceMappingSourceAccessConfigurationArgs[]
startingPosition string

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB or MSK. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

startingPositionTimestamp string

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

state string

The state of the event source mapping.

stateTransitionReason string

The reason the event source mapping is in its current state.

topics string[]

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

tumblingWindowInSeconds number

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

uuid string

The UUID of the created event source mapping.

batch_size int

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

  • bisect_batch_on_function_error: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
  • destination_config: - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
bisect_batch_on_function_error bool
destination_config EventSourceMappingDestinationConfigArgs
enabled bool

Determines if the mapping will be enabled on creation. Defaults to true.

event_source_arn str

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker or MSK cluster. It is incompatible with a Self Managed Kafka source.

filter_criteria EventSourceMappingFilterCriteriaArgs

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

function_arn str

The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)

function_name str

The name or the ARN of the Lambda function that will be subscribing to events.

function_response_types Sequence[str]

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

last_modified str

The date this resource was last modified.

last_processing_result str

The result of the last AWS Lambda invocation of your Lambda function.

maximum_batching_window_in_seconds int

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

  • maximum_record_age_in_seconds: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
  • maximum_retry_attempts: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
  • parallelization_factor: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
maximum_record_age_in_seconds int
maximum_retry_attempts int
parallelization_factor int
queues Sequence[str]

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

  • self_managed_event_source: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
  • source_access_configuration: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
self_managed_event_source EventSourceMappingSelfManagedEventSourceArgs
source_access_configurations EventSourceMappingSourceAccessConfigurationArgs]
starting_position str

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB or MSK. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

starting_position_timestamp str

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

state str

The state of the event source mapping.

state_transition_reason str

The reason the event source mapping is in its current state.

topics Sequence[str]

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

tumbling_window_in_seconds int

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

uuid str

The UUID of the created event source mapping.

batchSize Number

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

  • bisect_batch_on_function_error: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false.
  • destination_config: - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
bisectBatchOnFunctionError Boolean
destinationConfig Property Map
enabled Boolean

Determines if the mapping will be enabled on creation. Defaults to true.

eventSourceArn String

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker or MSK cluster. It is incompatible with a Self Managed Kafka source.

filterCriteria Property Map

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

functionArn String

The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.)

functionName String

The name or the ARN of the Lambda function that will be subscribing to events.

functionResponseTypes List<String>

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

lastModified String

The date this resource was last modified.

lastProcessingResult String

The result of the last AWS Lambda invocation of your Lambda function.

maximumBatchingWindowInSeconds Number

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

  • maximum_record_age_in_seconds: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
  • maximum_retry_attempts: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
  • parallelization_factor: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
maximumRecordAgeInSeconds Number
maximumRetryAttempts Number
parallelizationFactor Number
queues List<String>

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

  • self_managed_event_source: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below.
  • source_access_configuration: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.
selfManagedEventSource Property Map
sourceAccessConfigurations List<Property Map>
startingPosition String

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB or MSK. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

startingPositionTimestamp String

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

state String

The state of the event source mapping.

stateTransitionReason String

The reason the event source mapping is in its current state.

topics List<String>

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

tumblingWindowInSeconds Number

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

uuid String

The UUID of the created event source mapping.

Supporting Types

EventSourceMappingDestinationConfig

OnFailure EventSourceMappingDestinationConfigOnFailure

The destination configuration for failed invocations. Detailed below.

OnFailure EventSourceMappingDestinationConfigOnFailure

The destination configuration for failed invocations. Detailed below.

onFailure EventSourceMappingDestinationConfigOnFailure

The destination configuration for failed invocations. Detailed below.

onFailure EventSourceMappingDestinationConfigOnFailure

The destination configuration for failed invocations. Detailed below.

on_failure EventSourceMappingDestinationConfigOnFailure

The destination configuration for failed invocations. Detailed below.

onFailure Property Map

The destination configuration for failed invocations. Detailed below.

EventSourceMappingDestinationConfigOnFailure

DestinationArn string

The Amazon Resource Name (ARN) of the destination resource.

DestinationArn string

The Amazon Resource Name (ARN) of the destination resource.

destinationArn String

The Amazon Resource Name (ARN) of the destination resource.

destinationArn string

The Amazon Resource Name (ARN) of the destination resource.

destination_arn str

The Amazon Resource Name (ARN) of the destination resource.

destinationArn String

The Amazon Resource Name (ARN) of the destination resource.

EventSourceMappingFilterCriteria

Filters List<EventSourceMappingFilterCriteriaFilter>

A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.

Filters []EventSourceMappingFilterCriteriaFilter

A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.

filters List<EventSourceMappingFilterCriteriaFilter>

A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.

filters EventSourceMappingFilterCriteriaFilter[]

A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.

filters EventSourceMappingFilterCriteriaFilter]

A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.

filters List<Property Map>

A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.

EventSourceMappingFilterCriteriaFilter

Pattern string

A filter pattern up to 4096 characters. See Filter Rule Syntax.

Pattern string

A filter pattern up to 4096 characters. See Filter Rule Syntax.

pattern String

A filter pattern up to 4096 characters. See Filter Rule Syntax.

pattern string

A filter pattern up to 4096 characters. See Filter Rule Syntax.

pattern str

A filter pattern up to 4096 characters. See Filter Rule Syntax.

pattern String

A filter pattern up to 4096 characters. See Filter Rule Syntax.

EventSourceMappingSelfManagedEventSource

Endpoints Dictionary<string, string>

A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints.

Endpoints map[string]string

A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints.

endpoints Map<String,String>

A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints.

endpoints {[key: string]: string}

A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints.

endpoints Mapping[str, str]

A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints.

endpoints Map<String>

A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints.

EventSourceMappingSourceAccessConfiguration

Type string

The type of this configuration. For Self Managed Kafka you will need to supply blocks for type VPC_SUBNET and VPC_SECURITY_GROUP.

Uri string

The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws.ec2.Subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.

Type string

The type of this configuration. For Self Managed Kafka you will need to supply blocks for type VPC_SUBNET and VPC_SECURITY_GROUP.

Uri string

The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws.ec2.Subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.

type String

The type of this configuration. For Self Managed Kafka you will need to supply blocks for type VPC_SUBNET and VPC_SECURITY_GROUP.

uri String

The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws.ec2.Subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.

type string

The type of this configuration. For Self Managed Kafka you will need to supply blocks for type VPC_SUBNET and VPC_SECURITY_GROUP.

uri string

The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws.ec2.Subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.

type str

The type of this configuration. For Self Managed Kafka you will need to supply blocks for type VPC_SUBNET and VPC_SECURITY_GROUP.

uri str

The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws.ec2.Subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.

type String

The type of this configuration. For Self Managed Kafka you will need to supply blocks for type VPC_SUBNET and VPC_SECURITY_GROUP.

uri String

The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws.ec2.Subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.

Import

Lambda event source mappings can be imported using the UUID (event source mapping identifier), e.g.,

 $ pulumi import aws:lambda/eventSourceMapping:EventSourceMapping event_source_mapping 12345kxodurf3443

Package Details

Repository
https://github.com/pulumi/pulumi-aws
License
Apache-2.0
Notes

This Pulumi package is based on the aws Terraform Provider.