Try AWS Native preview for resources not in the classic version.
aws.lambda.EventSourceMapping
Explore with Pulumi AI
Try AWS Native preview for resources not in the classic version.
Provides a Lambda event source mapping. This allows Lambda functions to get events from Kinesis, DynamoDB, SQS, Amazon MQ and Managed Streaming for Apache Kafka (MSK).
For information about Lambda and how to use it, see What is AWS Lambda?. For information about event source mappings, see CreateEventSourceMapping in the API docs.
Example Usage
DynamoDB
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
EventSourceArn = aws_dynamodb_table.Example.Stream_arn,
FunctionName = aws_lambda_function.Example.Arn,
StartingPosition = "LATEST",
});
});
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
EventSourceArn: pulumi.Any(aws_dynamodb_table.Example.Stream_arn),
FunctionName: pulumi.Any(aws_lambda_function.Example.Arn),
StartingPosition: pulumi.String("LATEST"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(aws_dynamodb_table.example().stream_arn())
.functionName(aws_lambda_function.example().arn())
.startingPosition("LATEST")
.build());
}
}
import pulumi
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
event_source_arn=aws_dynamodb_table["example"]["stream_arn"],
function_name=aws_lambda_function["example"]["arn"],
starting_position="LATEST")
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
eventSourceArn: aws_dynamodb_table.example.stream_arn,
functionName: aws_lambda_function.example.arn,
startingPosition: "LATEST",
});
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
eventSourceArn: ${aws_dynamodb_table.example.stream_arn}
functionName: ${aws_lambda_function.example.arn}
startingPosition: LATEST
Kinesis
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
EventSourceArn = aws_kinesis_stream.Example.Arn,
FunctionName = aws_lambda_function.Example.Arn,
StartingPosition = "LATEST",
});
});
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
EventSourceArn: pulumi.Any(aws_kinesis_stream.Example.Arn),
FunctionName: pulumi.Any(aws_lambda_function.Example.Arn),
StartingPosition: pulumi.String("LATEST"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(aws_kinesis_stream.example().arn())
.functionName(aws_lambda_function.example().arn())
.startingPosition("LATEST")
.build());
}
}
import pulumi
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
event_source_arn=aws_kinesis_stream["example"]["arn"],
function_name=aws_lambda_function["example"]["arn"],
starting_position="LATEST")
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
eventSourceArn: aws_kinesis_stream.example.arn,
functionName: aws_lambda_function.example.arn,
startingPosition: "LATEST",
});
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
eventSourceArn: ${aws_kinesis_stream.example.arn}
functionName: ${aws_lambda_function.example.arn}
startingPosition: LATEST
Managed Streaming for Apache Kafka (MSK)
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
EventSourceArn = aws_msk_cluster.Example.Arn,
FunctionName = aws_lambda_function.Example.Arn,
Topics = new[]
{
"Example",
},
StartingPosition = "TRIM_HORIZON",
});
});
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
EventSourceArn: pulumi.Any(aws_msk_cluster.Example.Arn),
FunctionName: pulumi.Any(aws_lambda_function.Example.Arn),
Topics: pulumi.StringArray{
pulumi.String("Example"),
},
StartingPosition: pulumi.String("TRIM_HORIZON"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(aws_msk_cluster.example().arn())
.functionName(aws_lambda_function.example().arn())
.topics("Example")
.startingPosition("TRIM_HORIZON")
.build());
}
}
import pulumi
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
event_source_arn=aws_msk_cluster["example"]["arn"],
function_name=aws_lambda_function["example"]["arn"],
topics=["Example"],
starting_position="TRIM_HORIZON")
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
eventSourceArn: aws_msk_cluster.example.arn,
functionName: aws_lambda_function.example.arn,
topics: ["Example"],
startingPosition: "TRIM_HORIZON",
});
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
eventSourceArn: ${aws_msk_cluster.example.arn}
functionName: ${aws_lambda_function.example.arn}
topics:
- Example
startingPosition: TRIM_HORIZON
Self Managed Apache Kafka
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
FunctionName = aws_lambda_function.Example.Arn,
Topics = new[]
{
"Example",
},
StartingPosition = "TRIM_HORIZON",
SelfManagedEventSource = new Aws.Lambda.Inputs.EventSourceMappingSelfManagedEventSourceArgs
{
Endpoints =
{
{ "KAFKA_BOOTSTRAP_SERVERS", "kafka1.example.com:9092,kafka2.example.com:9092" },
},
},
SourceAccessConfigurations = new[]
{
new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
{
Type = "VPC_SUBNET",
Uri = "subnet:subnet-example1",
},
new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
{
Type = "VPC_SUBNET",
Uri = "subnet:subnet-example2",
},
new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
{
Type = "VPC_SECURITY_GROUP",
Uri = "security_group:sg-example",
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
FunctionName: pulumi.Any(aws_lambda_function.Example.Arn),
Topics: pulumi.StringArray{
pulumi.String("Example"),
},
StartingPosition: pulumi.String("TRIM_HORIZON"),
SelfManagedEventSource: &lambda.EventSourceMappingSelfManagedEventSourceArgs{
Endpoints: pulumi.StringMap{
"KAFKA_BOOTSTRAP_SERVERS": pulumi.String("kafka1.example.com:9092,kafka2.example.com:9092"),
},
},
SourceAccessConfigurations: lambda.EventSourceMappingSourceAccessConfigurationArray{
&lambda.EventSourceMappingSourceAccessConfigurationArgs{
Type: pulumi.String("VPC_SUBNET"),
Uri: pulumi.String("subnet:subnet-example1"),
},
&lambda.EventSourceMappingSourceAccessConfigurationArgs{
Type: pulumi.String("VPC_SUBNET"),
Uri: pulumi.String("subnet:subnet-example2"),
},
&lambda.EventSourceMappingSourceAccessConfigurationArgs{
Type: pulumi.String("VPC_SECURITY_GROUP"),
Uri: pulumi.String("security_group:sg-example"),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingSelfManagedEventSourceArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingSourceAccessConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.functionName(aws_lambda_function.example().arn())
.topics("Example")
.startingPosition("TRIM_HORIZON")
.selfManagedEventSource(EventSourceMappingSelfManagedEventSourceArgs.builder()
.endpoints(Map.of("KAFKA_BOOTSTRAP_SERVERS", "kafka1.example.com:9092,kafka2.example.com:9092"))
.build())
.sourceAccessConfigurations(
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("VPC_SUBNET")
.uri("subnet:subnet-example1")
.build(),
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("VPC_SUBNET")
.uri("subnet:subnet-example2")
.build(),
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("VPC_SECURITY_GROUP")
.uri("security_group:sg-example")
.build())
.build());
}
}
import pulumi
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
function_name=aws_lambda_function["example"]["arn"],
topics=["Example"],
starting_position="TRIM_HORIZON",
self_managed_event_source=aws.lambda_.EventSourceMappingSelfManagedEventSourceArgs(
endpoints={
"KAFKA_BOOTSTRAP_SERVERS": "kafka1.example.com:9092,kafka2.example.com:9092",
},
),
source_access_configurations=[
aws.lambda_.EventSourceMappingSourceAccessConfigurationArgs(
type="VPC_SUBNET",
uri="subnet:subnet-example1",
),
aws.lambda_.EventSourceMappingSourceAccessConfigurationArgs(
type="VPC_SUBNET",
uri="subnet:subnet-example2",
),
aws.lambda_.EventSourceMappingSourceAccessConfigurationArgs(
type="VPC_SECURITY_GROUP",
uri="security_group:sg-example",
),
])
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
functionName: aws_lambda_function.example.arn,
topics: ["Example"],
startingPosition: "TRIM_HORIZON",
selfManagedEventSource: {
endpoints: {
KAFKA_BOOTSTRAP_SERVERS: "kafka1.example.com:9092,kafka2.example.com:9092",
},
},
sourceAccessConfigurations: [
{
type: "VPC_SUBNET",
uri: "subnet:subnet-example1",
},
{
type: "VPC_SUBNET",
uri: "subnet:subnet-example2",
},
{
type: "VPC_SECURITY_GROUP",
uri: "security_group:sg-example",
},
],
});
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
functionName: ${aws_lambda_function.example.arn}
topics:
- Example
startingPosition: TRIM_HORIZON
selfManagedEventSource:
endpoints:
KAFKA_BOOTSTRAP_SERVERS: kafka1.example.com:9092,kafka2.example.com:9092
sourceAccessConfigurations:
- type: VPC_SUBNET
uri: subnet:subnet-example1
- type: VPC_SUBNET
uri: subnet:subnet-example2
- type: VPC_SECURITY_GROUP
uri: security_group:sg-example
SQS
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
EventSourceArn = aws_sqs_queue.Sqs_queue_test.Arn,
FunctionName = aws_lambda_function.Example.Arn,
});
});
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
EventSourceArn: pulumi.Any(aws_sqs_queue.Sqs_queue_test.Arn),
FunctionName: pulumi.Any(aws_lambda_function.Example.Arn),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(aws_sqs_queue.sqs_queue_test().arn())
.functionName(aws_lambda_function.example().arn())
.build());
}
}
import pulumi
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
event_source_arn=aws_sqs_queue["sqs_queue_test"]["arn"],
function_name=aws_lambda_function["example"]["arn"])
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
eventSourceArn: aws_sqs_queue.sqs_queue_test.arn,
functionName: aws_lambda_function.example.arn,
});
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
eventSourceArn: ${aws_sqs_queue.sqs_queue_test.arn}
functionName: ${aws_lambda_function.example.arn}
SQS with event filter
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
EventSourceArn = aws_sqs_queue.Sqs_queue_test.Arn,
FunctionName = aws_lambda_function.Example.Arn,
FilterCriteria = new Aws.Lambda.Inputs.EventSourceMappingFilterCriteriaArgs
{
Filters = new[]
{
new Aws.Lambda.Inputs.EventSourceMappingFilterCriteriaFilterArgs
{
Pattern = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["body"] = new Dictionary<string, object?>
{
["Temperature"] = new[]
{
new Dictionary<string, object?>
{
["numeric"] = new[]
{
">",
0,
"<=",
100,
},
},
},
["Location"] = new[]
{
"New York",
},
},
}),
},
},
},
});
});
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
tmpJSON0, err := json.Marshal(map[string]interface{}{
"body": map[string]interface{}{
"Temperature": []map[string]interface{}{
map[string]interface{}{
"numeric": []interface{}{
">",
0,
"<=",
100,
},
},
},
"Location": []string{
"New York",
},
},
})
if err != nil {
return err
}
json0 := string(tmpJSON0)
_, err = lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
EventSourceArn: pulumi.Any(aws_sqs_queue.Sqs_queue_test.Arn),
FunctionName: pulumi.Any(aws_lambda_function.Example.Arn),
FilterCriteria: &lambda.EventSourceMappingFilterCriteriaArgs{
Filters: lambda.EventSourceMappingFilterCriteriaFilterArray{
&lambda.EventSourceMappingFilterCriteriaFilterArgs{
Pattern: pulumi.String(json0),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingFilterCriteriaArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(aws_sqs_queue.sqs_queue_test().arn())
.functionName(aws_lambda_function.example().arn())
.filterCriteria(EventSourceMappingFilterCriteriaArgs.builder()
.filters(EventSourceMappingFilterCriteriaFilterArgs.builder()
.pattern(serializeJson(
jsonObject(
jsonProperty("body", jsonObject(
jsonProperty("Temperature", jsonArray(jsonObject(
jsonProperty("numeric", jsonArray(
">",
0,
"<=",
100
))
))),
jsonProperty("Location", jsonArray("New York"))
))
)))
.build())
.build())
.build());
}
}
import pulumi
import json
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
event_source_arn=aws_sqs_queue["sqs_queue_test"]["arn"],
function_name=aws_lambda_function["example"]["arn"],
filter_criteria=aws.lambda_.EventSourceMappingFilterCriteriaArgs(
filters=[aws.lambda_.EventSourceMappingFilterCriteriaFilterArgs(
pattern=json.dumps({
"body": {
"Temperature": [{
"numeric": [
">",
0,
"<=",
100,
],
}],
"Location": ["New York"],
},
}),
)],
))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
eventSourceArn: aws_sqs_queue.sqs_queue_test.arn,
functionName: aws_lambda_function.example.arn,
filterCriteria: {
filters: [{
pattern: JSON.stringify({
body: {
Temperature: [{
numeric: [
">",
0,
"<=",
100,
],
}],
Location: ["New York"],
},
}),
}],
},
});
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
eventSourceArn: ${aws_sqs_queue.sqs_queue_test.arn}
functionName: ${aws_lambda_function.example.arn}
filterCriteria:
filters:
- pattern:
fn::toJSON:
body:
Temperature:
- numeric:
- '>'
- 0
- <=
- 100
Location:
- New York
Amazon MQ (ActiveMQ)
Coming soon!
Coming soon!
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingSourceAccessConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.batchSize(10)
.eventSourceArn(aws_mq_broker.example().arn())
.enabled(true)
.functionName(aws_lambda_function.example().arn())
.queues("example")
.sourceAccessConfigurations(EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("BASIC_AUTH")
.uri(aws_secretsmanager_secret_version.example().arn())
.build())
.build());
}
}
Coming soon!
Coming soon!
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
batchSize: 10
eventSourceArn: ${aws_mq_broker.example.arn}
enabled: true
functionName: ${aws_lambda_function.example.arn}
queues:
- example
sourceAccessConfigurations:
- type: BASIC_AUTH
uri: ${aws_secretsmanager_secret_version.example.arn}
Amazon MQ (RabbitMQ)
Coming soon!
Coming soon!
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingSourceAccessConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.batchSize(1)
.eventSourceArn(aws_mq_broker.example().arn())
.enabled(true)
.functionName(aws_lambda_function.example().arn())
.queues("example")
.sourceAccessConfigurations(
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("VIRTUAL_HOST")
.uri("/example")
.build(),
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("BASIC_AUTH")
.uri(aws_secretsmanager_secret_version.example().arn())
.build())
.build());
}
}
Coming soon!
Coming soon!
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
batchSize: 1
eventSourceArn: ${aws_mq_broker.example.arn}
enabled: true
functionName: ${aws_lambda_function.example.arn}
queues:
- example
sourceAccessConfigurations:
- type: VIRTUAL_HOST
uri: /example
- type: BASIC_AUTH
uri: ${aws_secretsmanager_secret_version.example.arn}
Create EventSourceMapping Resource
new EventSourceMapping(name: string, args: EventSourceMappingArgs, opts?: CustomResourceOptions);
@overload
def EventSourceMapping(resource_name: str,
opts: Optional[ResourceOptions] = None,
amazon_managed_kafka_event_source_config: Optional[_lambda_.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs] = None,
batch_size: Optional[int] = None,
bisect_batch_on_function_error: Optional[bool] = None,
destination_config: Optional[_lambda_.EventSourceMappingDestinationConfigArgs] = None,
document_db_event_source_config: Optional[_lambda_.EventSourceMappingDocumentDbEventSourceConfigArgs] = None,
enabled: Optional[bool] = None,
event_source_arn: Optional[str] = None,
filter_criteria: Optional[_lambda_.EventSourceMappingFilterCriteriaArgs] = None,
function_name: Optional[str] = None,
function_response_types: Optional[Sequence[str]] = None,
maximum_batching_window_in_seconds: Optional[int] = None,
maximum_record_age_in_seconds: Optional[int] = None,
maximum_retry_attempts: Optional[int] = None,
parallelization_factor: Optional[int] = None,
queues: Optional[str] = None,
scaling_config: Optional[_lambda_.EventSourceMappingScalingConfigArgs] = None,
self_managed_event_source: Optional[_lambda_.EventSourceMappingSelfManagedEventSourceArgs] = None,
self_managed_kafka_event_source_config: Optional[_lambda_.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs] = None,
source_access_configurations: Optional[Sequence[_lambda_.EventSourceMappingSourceAccessConfigurationArgs]] = None,
starting_position: Optional[str] = None,
starting_position_timestamp: Optional[str] = None,
topics: Optional[Sequence[str]] = None,
tumbling_window_in_seconds: Optional[int] = None)
@overload
def EventSourceMapping(resource_name: str,
args: EventSourceMappingArgs,
opts: Optional[ResourceOptions] = None)
func NewEventSourceMapping(ctx *Context, name string, args EventSourceMappingArgs, opts ...ResourceOption) (*EventSourceMapping, error)
public EventSourceMapping(string name, EventSourceMappingArgs args, CustomResourceOptions? opts = null)
public EventSourceMapping(String name, EventSourceMappingArgs args)
public EventSourceMapping(String name, EventSourceMappingArgs args, CustomResourceOptions options)
type: aws:lambda:EventSourceMapping
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args EventSourceMappingArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args EventSourceMappingArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args EventSourceMappingArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args EventSourceMappingArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args EventSourceMappingArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
EventSourceMapping Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The EventSourceMapping resource accepts the following input properties:
- Function
Name string The name or the ARN of the Lambda function that will be subscribing to events.
- Amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- Batch
Size int The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS.- Bisect
Batch boolOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- Destination
Config EventSource Mapping Destination Config - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
- Document
Db EventEvent Source Config Source Mapping Document Db Event Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- Enabled bool
Determines if the mapping will be enabled on creation. Defaults to
true
.- Event
Source stringArn The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- Filter
Criteria EventSource Mapping Filter Criteria The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- Function
Response List<string>Types A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
.- Maximum
Batching intWindow In Seconds The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.- Maximum
Record intAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- Maximum
Retry intAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- Parallelization
Factor int - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- Queues string
The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- Scaling
Config EventSource Mapping Scaling Config Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- Self
Managed EventEvent Source Source Mapping Self Managed Event Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- Self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- Source
Access List<EventConfigurations Source Mapping Source Access Configuration> For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below.- Starting
Position string The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.- Starting
Position stringTimestamp A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.- Topics List<string>
The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- Tumbling
Window intIn Seconds The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- Function
Name string The name or the ARN of the Lambda function that will be subscribing to events.
- Amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config Args Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- Batch
Size int The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS.- Bisect
Batch boolOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- Destination
Config EventSource Mapping Destination Config Args - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
- Document
Db EventEvent Source Config Source Mapping Document Db Event Source Config Args - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- Enabled bool
Determines if the mapping will be enabled on creation. Defaults to
true
.- Event
Source stringArn The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- Filter
Criteria EventSource Mapping Filter Criteria Args The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- Function
Response []stringTypes A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
.- Maximum
Batching intWindow In Seconds The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.- Maximum
Record intAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- Maximum
Retry intAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- Parallelization
Factor int - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- Queues string
The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- Scaling
Config EventSource Mapping Scaling Config Args Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- Self
Managed EventEvent Source Source Mapping Self Managed Event Source Args - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- Self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config Args Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- Source
Access []EventConfigurations Source Mapping Source Access Configuration Args For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below.- Starting
Position string The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.- Starting
Position stringTimestamp A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.- Topics []string
The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- Tumbling
Window intIn Seconds The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- function
Name String The name or the ARN of the Lambda function that will be subscribing to events.
- amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch
Size Integer The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS.- bisect
Batch BooleanOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination
Config EventSource Mapping Destination Config - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
- document
Db EventEvent Source Config Source Mapping Document Db Event Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled Boolean
Determines if the mapping will be enabled on creation. Defaults to
true
.- event
Source StringArn The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter
Criteria EventSource Mapping Filter Criteria The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function
Response List<String>Types A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
.- maximum
Batching IntegerWindow In Seconds The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.- maximum
Record IntegerAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum
Retry IntegerAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization
Factor Integer - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues String
The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling
Config EventSource Mapping Scaling Config Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self
Managed EventEvent Source Source Mapping Self Managed Event Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source
Access List<EventConfigurations Source Mapping Source Access Configuration> For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below.- starting
Position String The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.- starting
Position StringTimestamp A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.- topics List<String>
The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling
Window IntegerIn Seconds The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- function
Name string The name or the ARN of the Lambda function that will be subscribing to events.
- amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch
Size number The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS.- bisect
Batch booleanOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination
Config EventSource Mapping Destination Config - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
- document
Db EventEvent Source Config Source Mapping Document Db Event Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled boolean
Determines if the mapping will be enabled on creation. Defaults to
true
.- event
Source stringArn The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter
Criteria EventSource Mapping Filter Criteria The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function
Response string[]Types A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
.- maximum
Batching numberWindow In Seconds The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.- maximum
Record numberAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum
Retry numberAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization
Factor number - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues string
The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling
Config EventSource Mapping Scaling Config Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self
Managed EventEvent Source Source Mapping Self Managed Event Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source
Access EventConfigurations Source Mapping Source Access Configuration[] For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below.- starting
Position string The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.- starting
Position stringTimestamp A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.- topics string[]
The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling
Window numberIn Seconds The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- function_
name str The name or the ARN of the Lambda function that will be subscribing to events.
- amazon_
managed_ Eventkafka_ event_ source_ config Source Mapping Amazon Managed Kafka Event Source Config Args Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch_
size int The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS.- bisect_
batch_ boolon_ function_ error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination_
config EventSource Mapping Destination Config Args - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
- document_
db_ Eventevent_ source_ config Source Mapping Document Db Event Source Config Args - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled bool
Determines if the mapping will be enabled on creation. Defaults to
true
.- event_
source_ strarn The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter_
criteria EventSource Mapping Filter Criteria Args The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function_
response_ Sequence[str]types A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
.- maximum_
batching_ intwindow_ in_ seconds The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.- maximum_
record_ intage_ in_ seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum_
retry_ intattempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization_
factor int - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues str
The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling_
config EventSource Mapping Scaling Config Args Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self_
managed_ Eventevent_ source Source Mapping Self Managed Event Source Args - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self_
managed_ Eventkafka_ event_ source_ config Source Mapping Self Managed Kafka Event Source Config Args Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source_
access_ Eventconfigurations Source Mapping Source Access Configuration Args] For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below.- starting_
position str The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.- starting_
position_ strtimestamp A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.- topics Sequence[str]
The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling_
window_ intin_ seconds The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- function
Name String The name or the ARN of the Lambda function that will be subscribing to events.
- amazon
Managed Property MapKafka Event Source Config Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch
Size Number The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS.- bisect
Batch BooleanOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination
Config Property Map - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
- document
Db Property MapEvent Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled Boolean
Determines if the mapping will be enabled on creation. Defaults to
true
.- event
Source StringArn The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter
Criteria Property Map The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function
Response List<String>Types A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
.- maximum
Batching NumberWindow In Seconds The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.- maximum
Record NumberAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum
Retry NumberAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization
Factor Number - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues String
The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling
Config Property Map Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self
Managed Property MapEvent Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self
Managed Property MapKafka Event Source Config Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source
Access List<Property Map>Configurations For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below.- starting
Position String The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.- starting
Position StringTimestamp A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.- topics List<String>
The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling
Window NumberIn Seconds The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
Outputs
All input properties are implicitly available as output properties. Additionally, the EventSourceMapping resource produces the following output properties:
- Function
Arn string The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.)- Id string
The provider-assigned unique ID for this managed resource.
- Last
Modified string The date this resource was last modified.
- Last
Processing stringResult The result of the last AWS Lambda invocation of your Lambda function.
- State string
The state of the event source mapping.
- State
Transition stringReason The reason the event source mapping is in its current state.
- Uuid string
The UUID of the created event source mapping.
- Function
Arn string The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.)- Id string
The provider-assigned unique ID for this managed resource.
- Last
Modified string The date this resource was last modified.
- Last
Processing stringResult The result of the last AWS Lambda invocation of your Lambda function.
- State string
The state of the event source mapping.
- State
Transition stringReason The reason the event source mapping is in its current state.
- Uuid string
The UUID of the created event source mapping.
- function
Arn String The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.)- id String
The provider-assigned unique ID for this managed resource.
- last
Modified String The date this resource was last modified.
- last
Processing StringResult The result of the last AWS Lambda invocation of your Lambda function.
- state String
The state of the event source mapping.
- state
Transition StringReason The reason the event source mapping is in its current state.
- uuid String
The UUID of the created event source mapping.
- function
Arn string The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.)- id string
The provider-assigned unique ID for this managed resource.
- last
Modified string The date this resource was last modified.
- last
Processing stringResult The result of the last AWS Lambda invocation of your Lambda function.
- state string
The state of the event source mapping.
- state
Transition stringReason The reason the event source mapping is in its current state.
- uuid string
The UUID of the created event source mapping.
- function_
arn str The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.)- id str
The provider-assigned unique ID for this managed resource.
- last_
modified str The date this resource was last modified.
- last_
processing_ strresult The result of the last AWS Lambda invocation of your Lambda function.
- state str
The state of the event source mapping.
- state_
transition_ strreason The reason the event source mapping is in its current state.
- uuid str
The UUID of the created event source mapping.
- function
Arn String The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.)- id String
The provider-assigned unique ID for this managed resource.
- last
Modified String The date this resource was last modified.
- last
Processing StringResult The result of the last AWS Lambda invocation of your Lambda function.
- state String
The state of the event source mapping.
- state
Transition StringReason The reason the event source mapping is in its current state.
- uuid String
The UUID of the created event source mapping.
Look up Existing EventSourceMapping Resource
Get an existing EventSourceMapping resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: EventSourceMappingState, opts?: CustomResourceOptions): EventSourceMapping
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
amazon_managed_kafka_event_source_config: Optional[_lambda_.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs] = None,
batch_size: Optional[int] = None,
bisect_batch_on_function_error: Optional[bool] = None,
destination_config: Optional[_lambda_.EventSourceMappingDestinationConfigArgs] = None,
document_db_event_source_config: Optional[_lambda_.EventSourceMappingDocumentDbEventSourceConfigArgs] = None,
enabled: Optional[bool] = None,
event_source_arn: Optional[str] = None,
filter_criteria: Optional[_lambda_.EventSourceMappingFilterCriteriaArgs] = None,
function_arn: Optional[str] = None,
function_name: Optional[str] = None,
function_response_types: Optional[Sequence[str]] = None,
last_modified: Optional[str] = None,
last_processing_result: Optional[str] = None,
maximum_batching_window_in_seconds: Optional[int] = None,
maximum_record_age_in_seconds: Optional[int] = None,
maximum_retry_attempts: Optional[int] = None,
parallelization_factor: Optional[int] = None,
queues: Optional[str] = None,
scaling_config: Optional[_lambda_.EventSourceMappingScalingConfigArgs] = None,
self_managed_event_source: Optional[_lambda_.EventSourceMappingSelfManagedEventSourceArgs] = None,
self_managed_kafka_event_source_config: Optional[_lambda_.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs] = None,
source_access_configurations: Optional[Sequence[_lambda_.EventSourceMappingSourceAccessConfigurationArgs]] = None,
starting_position: Optional[str] = None,
starting_position_timestamp: Optional[str] = None,
state: Optional[str] = None,
state_transition_reason: Optional[str] = None,
topics: Optional[Sequence[str]] = None,
tumbling_window_in_seconds: Optional[int] = None,
uuid: Optional[str] = None) -> EventSourceMapping
func GetEventSourceMapping(ctx *Context, name string, id IDInput, state *EventSourceMappingState, opts ...ResourceOption) (*EventSourceMapping, error)
public static EventSourceMapping Get(string name, Input<string> id, EventSourceMappingState? state, CustomResourceOptions? opts = null)
public static EventSourceMapping get(String name, Output<String> id, EventSourceMappingState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- Batch
Size int The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS.- Bisect
Batch boolOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- Destination
Config EventSource Mapping Destination Config - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
- Document
Db EventEvent Source Config Source Mapping Document Db Event Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- Enabled bool
Determines if the mapping will be enabled on creation. Defaults to
true
.- Event
Source stringArn The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- Filter
Criteria EventSource Mapping Filter Criteria The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- Function
Arn string The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.)- Function
Name string The name or the ARN of the Lambda function that will be subscribing to events.
- Function
Response List<string>Types A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
.- Last
Modified string The date this resource was last modified.
- Last
Processing stringResult The result of the last AWS Lambda invocation of your Lambda function.
- Maximum
Batching intWindow In Seconds The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.- Maximum
Record intAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- Maximum
Retry intAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- Parallelization
Factor int - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- Queues string
The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- Scaling
Config EventSource Mapping Scaling Config Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- Self
Managed EventEvent Source Source Mapping Self Managed Event Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- Self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- Source
Access List<EventConfigurations Source Mapping Source Access Configuration> For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below.- Starting
Position string The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.- Starting
Position stringTimestamp A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.- State string
The state of the event source mapping.
- State
Transition stringReason The reason the event source mapping is in its current state.
- Topics List<string>
The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- Tumbling
Window intIn Seconds The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- Uuid string
The UUID of the created event source mapping.
- Amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config Args Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- Batch
Size int The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS.- Bisect
Batch boolOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- Destination
Config EventSource Mapping Destination Config Args - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
- Document
Db EventEvent Source Config Source Mapping Document Db Event Source Config Args - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- Enabled bool
Determines if the mapping will be enabled on creation. Defaults to
true
.- Event
Source stringArn The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- Filter
Criteria EventSource Mapping Filter Criteria Args The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- Function
Arn string The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.)- Function
Name string The name or the ARN of the Lambda function that will be subscribing to events.
- Function
Response []stringTypes A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
.- Last
Modified string The date this resource was last modified.
- Last
Processing stringResult The result of the last AWS Lambda invocation of your Lambda function.
- Maximum
Batching intWindow In Seconds The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.- Maximum
Record intAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- Maximum
Retry intAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- Parallelization
Factor int - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- Queues string
The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- Scaling
Config EventSource Mapping Scaling Config Args Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- Self
Managed EventEvent Source Source Mapping Self Managed Event Source Args - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- Self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config Args Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- Source
Access []EventConfigurations Source Mapping Source Access Configuration Args For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below.- Starting
Position string The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.- Starting
Position stringTimestamp A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.- State string
The state of the event source mapping.
- State
Transition stringReason The reason the event source mapping is in its current state.
- Topics []string
The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- Tumbling
Window intIn Seconds The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- Uuid string
The UUID of the created event source mapping.
- amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch
Size Integer The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS.- bisect
Batch BooleanOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination
Config EventSource Mapping Destination Config - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
- document
Db EventEvent Source Config Source Mapping Document Db Event Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled Boolean
Determines if the mapping will be enabled on creation. Defaults to
true
.- event
Source StringArn The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter
Criteria EventSource Mapping Filter Criteria The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function
Arn String The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.)- function
Name String The name or the ARN of the Lambda function that will be subscribing to events.
- function
Response List<String>Types A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
.- last
Modified String The date this resource was last modified.
- last
Processing StringResult The result of the last AWS Lambda invocation of your Lambda function.
- maximum
Batching IntegerWindow In Seconds The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.- maximum
Record IntegerAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum
Retry IntegerAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization
Factor Integer - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues String
The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling
Config EventSource Mapping Scaling Config Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self
Managed EventEvent Source Source Mapping Self Managed Event Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source
Access List<EventConfigurations Source Mapping Source Access Configuration> For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below.- starting
Position String The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.- starting
Position StringTimestamp A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.- state String
The state of the event source mapping.
- state
Transition StringReason The reason the event source mapping is in its current state.
- topics List<String>
The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling
Window IntegerIn Seconds The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- uuid String
The UUID of the created event source mapping.
- amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch
Size number The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS.- bisect
Batch booleanOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination
Config EventSource Mapping Destination Config - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
- document
Db EventEvent Source Config Source Mapping Document Db Event Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled boolean
Determines if the mapping will be enabled on creation. Defaults to
true
.- event
Source stringArn The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter
Criteria EventSource Mapping Filter Criteria The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function
Arn string The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.)- function
Name string The name or the ARN of the Lambda function that will be subscribing to events.
- function
Response string[]Types A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
.- last
Modified string The date this resource was last modified.
- last
Processing stringResult The result of the last AWS Lambda invocation of your Lambda function.
- maximum
Batching numberWindow In Seconds The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.- maximum
Record numberAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum
Retry numberAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization
Factor number - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues string
The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling
Config EventSource Mapping Scaling Config Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self
Managed EventEvent Source Source Mapping Self Managed Event Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source
Access EventConfigurations Source Mapping Source Access Configuration[] For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below.- starting
Position string The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.- starting
Position stringTimestamp A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.- state string
The state of the event source mapping.
- state
Transition stringReason The reason the event source mapping is in its current state.
- topics string[]
The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling
Window numberIn Seconds The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- uuid string
The UUID of the created event source mapping.
- amazon_
managed_ Eventkafka_ event_ source_ config Source Mapping Amazon Managed Kafka Event Source Config Args Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch_
size int The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS.- bisect_
batch_ boolon_ function_ error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination_
config EventSource Mapping Destination Config Args - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
- document_
db_ Eventevent_ source_ config Source Mapping Document Db Event Source Config Args - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled bool
Determines if the mapping will be enabled on creation. Defaults to
true
.- event_
source_ strarn The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter_
criteria EventSource Mapping Filter Criteria Args The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function_
arn str The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.)- function_
name str The name or the ARN of the Lambda function that will be subscribing to events.
- function_
response_ Sequence[str]types A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
.- last_
modified str The date this resource was last modified.
- last_
processing_ strresult The result of the last AWS Lambda invocation of your Lambda function.
- maximum_
batching_ intwindow_ in_ seconds The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.- maximum_
record_ intage_ in_ seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum_
retry_ intattempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization_
factor int - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues str
The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling_
config EventSource Mapping Scaling Config Args Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self_
managed_ Eventevent_ source Source Mapping Self Managed Event Source Args - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self_
managed_ Eventkafka_ event_ source_ config Source Mapping Self Managed Kafka Event Source Config Args Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source_
access_ Eventconfigurations Source Mapping Source Access Configuration Args] For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below.- starting_
position str The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.- starting_
position_ strtimestamp A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.- state str
The state of the event source mapping.
- state_
transition_ strreason The reason the event source mapping is in its current state.
- topics Sequence[str]
The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling_
window_ intin_ seconds The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- uuid str
The UUID of the created event source mapping.
- amazon
Managed Property MapKafka Event Source Config Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch
Size Number The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS.- bisect
Batch BooleanOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination
Config Property Map - (Optional) An Amazon SQS queue or Amazon SNS topic destination for failed records. Only available for stream sources (DynamoDB and Kinesis). Detailed below.
- document
Db Property MapEvent Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled Boolean
Determines if the mapping will be enabled on creation. Defaults to
true
.- event
Source StringArn The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter
Criteria Property Map The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function
Arn String The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.)- function
Name String The name or the ARN of the Lambda function that will be subscribing to events.
- function
Response List<String>Types A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
.- last
Modified String The date this resource was last modified.
- last
Processing StringResult The result of the last AWS Lambda invocation of your Lambda function.
- maximum
Batching NumberWindow In Seconds The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.- maximum
Record NumberAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum
Retry NumberAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization
Factor Number - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues String
The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling
Config Property Map Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self
Managed Property MapEvent Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self
Managed Property MapKafka Event Source Config Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source
Access List<Property Map>Configurations For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below.- starting
Position String The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.- starting
Position StringTimestamp A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.- state String
The state of the event source mapping.
- state
Transition StringReason The reason the event source mapping is in its current state.
- topics List<String>
The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling
Window NumberIn Seconds The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- uuid String
The UUID of the created event source mapping.
Supporting Types
EventSourceMappingAmazonManagedKafkaEventSourceConfig, EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs
- Consumer
Group stringId A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
- Consumer
Group stringId A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
- consumer
Group StringId A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
- consumer
Group stringId A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
- consumer_
group_ strid A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
- consumer
Group StringId A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
EventSourceMappingDestinationConfig, EventSourceMappingDestinationConfigArgs
- On
Failure EventSource Mapping Destination Config On Failure The destination configuration for failed invocations. Detailed below.
- On
Failure EventSource Mapping Destination Config On Failure The destination configuration for failed invocations. Detailed below.
- on
Failure EventSource Mapping Destination Config On Failure The destination configuration for failed invocations. Detailed below.
- on
Failure EventSource Mapping Destination Config On Failure The destination configuration for failed invocations. Detailed below.
- on_
failure EventSource Mapping Destination Config On Failure The destination configuration for failed invocations. Detailed below.
- on
Failure Property Map The destination configuration for failed invocations. Detailed below.
EventSourceMappingDestinationConfigOnFailure, EventSourceMappingDestinationConfigOnFailureArgs
- Destination
Arn string The Amazon Resource Name (ARN) of the destination resource.
- Destination
Arn string The Amazon Resource Name (ARN) of the destination resource.
- destination
Arn String The Amazon Resource Name (ARN) of the destination resource.
- destination
Arn string The Amazon Resource Name (ARN) of the destination resource.
- destination_
arn str The Amazon Resource Name (ARN) of the destination resource.
- destination
Arn String The Amazon Resource Name (ARN) of the destination resource.
EventSourceMappingDocumentDbEventSourceConfig, EventSourceMappingDocumentDbEventSourceConfigArgs
- Database
Name string The name of the database to consume within the DocumentDB cluster.
- Collection
Name string The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
- Full
Document string Determines what DocumentDB sends to your event stream during document update operations. If set to
UpdateLookup
, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values:UpdateLookup
,Default
.
- Database
Name string The name of the database to consume within the DocumentDB cluster.
- Collection
Name string The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
- Full
Document string Determines what DocumentDB sends to your event stream during document update operations. If set to
UpdateLookup
, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values:UpdateLookup
,Default
.
- database
Name String The name of the database to consume within the DocumentDB cluster.
- collection
Name String The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
- full
Document String Determines what DocumentDB sends to your event stream during document update operations. If set to
UpdateLookup
, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values:UpdateLookup
,Default
.
- database
Name string The name of the database to consume within the DocumentDB cluster.
- collection
Name string The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
- full
Document string Determines what DocumentDB sends to your event stream during document update operations. If set to
UpdateLookup
, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values:UpdateLookup
,Default
.
- database_
name str The name of the database to consume within the DocumentDB cluster.
- collection_
name str The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
- full_
document str Determines what DocumentDB sends to your event stream during document update operations. If set to
UpdateLookup
, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values:UpdateLookup
,Default
.
- database
Name String The name of the database to consume within the DocumentDB cluster.
- collection
Name String The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
- full
Document String Determines what DocumentDB sends to your event stream during document update operations. If set to
UpdateLookup
, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values:UpdateLookup
,Default
.
EventSourceMappingFilterCriteria, EventSourceMappingFilterCriteriaArgs
- Filters
List<Event
Source Mapping Filter Criteria Filter> A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
- Filters
[]Event
Source Mapping Filter Criteria Filter A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
- filters
List<Event
Source Mapping Filter Criteria Filter> A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
- filters
Event
Source Mapping Filter Criteria Filter[] A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
- filters
Event
Source Mapping Filter Criteria Filter] A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
- filters List<Property Map>
A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
EventSourceMappingFilterCriteriaFilter, EventSourceMappingFilterCriteriaFilterArgs
- Pattern string
A filter pattern up to 4096 characters. See Filter Rule Syntax.
- Pattern string
A filter pattern up to 4096 characters. See Filter Rule Syntax.
- pattern String
A filter pattern up to 4096 characters. See Filter Rule Syntax.
- pattern string
A filter pattern up to 4096 characters. See Filter Rule Syntax.
- pattern str
A filter pattern up to 4096 characters. See Filter Rule Syntax.
- pattern String
A filter pattern up to 4096 characters. See Filter Rule Syntax.
EventSourceMappingScalingConfig, EventSourceMappingScalingConfigArgs
- Maximum
Concurrency int Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between
2
and1000
. See Configuring maximum concurrency for Amazon SQS event sources.
- Maximum
Concurrency int Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between
2
and1000
. See Configuring maximum concurrency for Amazon SQS event sources.
- maximum
Concurrency Integer Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between
2
and1000
. See Configuring maximum concurrency for Amazon SQS event sources.
- maximum
Concurrency number Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between
2
and1000
. See Configuring maximum concurrency for Amazon SQS event sources.
- maximum_
concurrency int Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between
2
and1000
. See Configuring maximum concurrency for Amazon SQS event sources.
- maximum
Concurrency Number Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between
2
and1000
. See Configuring maximum concurrency for Amazon SQS event sources.
EventSourceMappingSelfManagedEventSource, EventSourceMappingSelfManagedEventSourceArgs
- Endpoints Dictionary<string, string>
A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be
KAFKA_BOOTSTRAP_SERVERS
and the value should be a string with a comma separated list of broker endpoints.
- Endpoints map[string]string
A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be
KAFKA_BOOTSTRAP_SERVERS
and the value should be a string with a comma separated list of broker endpoints.
- endpoints Map<String,String>
A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be
KAFKA_BOOTSTRAP_SERVERS
and the value should be a string with a comma separated list of broker endpoints.
- endpoints {[key: string]: string}
A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be
KAFKA_BOOTSTRAP_SERVERS
and the value should be a string with a comma separated list of broker endpoints.
- endpoints Mapping[str, str]
A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be
KAFKA_BOOTSTRAP_SERVERS
and the value should be a string with a comma separated list of broker endpoints.
- endpoints Map<String>
A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be
KAFKA_BOOTSTRAP_SERVERS
and the value should be a string with a comma separated list of broker endpoints.
EventSourceMappingSelfManagedKafkaEventSourceConfig, EventSourceMappingSelfManagedKafkaEventSourceConfigArgs
- Consumer
Group stringId A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
- Consumer
Group stringId A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
- consumer
Group StringId A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
- consumer
Group stringId A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
- consumer_
group_ strid A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
- consumer
Group StringId A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
EventSourceMappingSourceAccessConfiguration, EventSourceMappingSourceAccessConfigurationArgs
- Type string
The type of this configuration. For Self Managed Kafka you will need to supply blocks for type
VPC_SUBNET
andVPC_SECURITY_GROUP
.- Uri string
The URI for this configuration. For type
VPC_SUBNET
the value should besubnet:subnet_id
wheresubnet_id
is the value you would find in an aws.ec2.Subnet resource's id attribute. For typeVPC_SECURITY_GROUP
the value should besecurity_group:security_group_id
wheresecurity_group_id
is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
- Type string
The type of this configuration. For Self Managed Kafka you will need to supply blocks for type
VPC_SUBNET
andVPC_SECURITY_GROUP
.- Uri string
The URI for this configuration. For type
VPC_SUBNET
the value should besubnet:subnet_id
wheresubnet_id
is the value you would find in an aws.ec2.Subnet resource's id attribute. For typeVPC_SECURITY_GROUP
the value should besecurity_group:security_group_id
wheresecurity_group_id
is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
- type String
The type of this configuration. For Self Managed Kafka you will need to supply blocks for type
VPC_SUBNET
andVPC_SECURITY_GROUP
.- uri String
The URI for this configuration. For type
VPC_SUBNET
the value should besubnet:subnet_id
wheresubnet_id
is the value you would find in an aws.ec2.Subnet resource's id attribute. For typeVPC_SECURITY_GROUP
the value should besecurity_group:security_group_id
wheresecurity_group_id
is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
- type string
The type of this configuration. For Self Managed Kafka you will need to supply blocks for type
VPC_SUBNET
andVPC_SECURITY_GROUP
.- uri string
The URI for this configuration. For type
VPC_SUBNET
the value should besubnet:subnet_id
wheresubnet_id
is the value you would find in an aws.ec2.Subnet resource's id attribute. For typeVPC_SECURITY_GROUP
the value should besecurity_group:security_group_id
wheresecurity_group_id
is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
- type str
The type of this configuration. For Self Managed Kafka you will need to supply blocks for type
VPC_SUBNET
andVPC_SECURITY_GROUP
.- uri str
The URI for this configuration. For type
VPC_SUBNET
the value should besubnet:subnet_id
wheresubnet_id
is the value you would find in an aws.ec2.Subnet resource's id attribute. For typeVPC_SECURITY_GROUP
the value should besecurity_group:security_group_id
wheresecurity_group_id
is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
- type String
The type of this configuration. For Self Managed Kafka you will need to supply blocks for type
VPC_SUBNET
andVPC_SECURITY_GROUP
.- uri String
The URI for this configuration. For type
VPC_SUBNET
the value should besubnet:subnet_id
wheresubnet_id
is the value you would find in an aws.ec2.Subnet resource's id attribute. For typeVPC_SECURITY_GROUP
the value should besecurity_group:security_group_id
wheresecurity_group_id
is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
Import
Using pulumi import
, import Lambda event source mappings using the UUID
(event source mapping identifier). For example:
$ pulumi import aws:lambda/eventSourceMapping:EventSourceMapping event_source_mapping 12345kxodurf3443
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
This Pulumi package is based on the
aws
Terraform Provider.
Try AWS Native preview for resources not in the classic version.