1. Packages
  2. AWS Classic
  3. API Docs
  4. dms
  5. Endpoint

Try AWS Native preview for resources not in the classic version.

AWS Classic v6.3.0 published on Thursday, Sep 28, 2023 by Pulumi

aws.dms.Endpoint

Explore with Pulumi AI

aws logo

Try AWS Native preview for resources not in the classic version.

AWS Classic v6.3.0 published on Thursday, Sep 28, 2023 by Pulumi

    Provides a DMS (Data Migration Service) endpoint resource. DMS endpoints can be created, updated, deleted, and imported.

    Note: All arguments including the password will be stored in the raw state as plain-text. Read more about sensitive data in state.

    Note: The s3_settings argument is deprecated, may not be maintained, and will be removed in a future version. Use the aws.dms.S3Endpoint resource instead.

    Example Usage

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        // Create a new endpoint
        var test = new Aws.Dms.Endpoint("test", new()
        {
            CertificateArn = "arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012",
            DatabaseName = "test",
            EndpointId = "test-dms-endpoint-tf",
            EndpointType = "source",
            EngineName = "aurora",
            ExtraConnectionAttributes = "",
            KmsKeyArn = "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012",
            Password = "test",
            Port = 3306,
            ServerName = "test",
            SslMode = "none",
            Tags = 
            {
                { "Name", "test" },
            },
            Username = "test",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/dms"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dms.NewEndpoint(ctx, "test", &dms.EndpointArgs{
    			CertificateArn:            pulumi.String("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"),
    			DatabaseName:              pulumi.String("test"),
    			EndpointId:                pulumi.String("test-dms-endpoint-tf"),
    			EndpointType:              pulumi.String("source"),
    			EngineName:                pulumi.String("aurora"),
    			ExtraConnectionAttributes: pulumi.String(""),
    			KmsKeyArn:                 pulumi.String("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"),
    			Password:                  pulumi.String("test"),
    			Port:                      pulumi.Int(3306),
    			ServerName:                pulumi.String("test"),
    			SslMode:                   pulumi.String("none"),
    			Tags: pulumi.StringMap{
    				"Name": pulumi.String("test"),
    			},
    			Username: pulumi.String("test"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.dms.Endpoint;
    import com.pulumi.aws.dms.EndpointArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var test = new Endpoint("test", EndpointArgs.builder()        
                .certificateArn("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012")
                .databaseName("test")
                .endpointId("test-dms-endpoint-tf")
                .endpointType("source")
                .engineName("aurora")
                .extraConnectionAttributes("")
                .kmsKeyArn("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012")
                .password("test")
                .port(3306)
                .serverName("test")
                .sslMode("none")
                .tags(Map.of("Name", "test"))
                .username("test")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_aws as aws
    
    # Create a new endpoint
    test = aws.dms.Endpoint("test",
        certificate_arn="arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012",
        database_name="test",
        endpoint_id="test-dms-endpoint-tf",
        endpoint_type="source",
        engine_name="aurora",
        extra_connection_attributes="",
        kms_key_arn="arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012",
        password="test",
        port=3306,
        server_name="test",
        ssl_mode="none",
        tags={
            "Name": "test",
        },
        username="test")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    // Create a new endpoint
    const test = new aws.dms.Endpoint("test", {
        certificateArn: "arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012",
        databaseName: "test",
        endpointId: "test-dms-endpoint-tf",
        endpointType: "source",
        engineName: "aurora",
        extraConnectionAttributes: "",
        kmsKeyArn: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012",
        password: "test",
        port: 3306,
        serverName: "test",
        sslMode: "none",
        tags: {
            Name: "test",
        },
        username: "test",
    });
    
    resources:
      # Create a new endpoint
      test:
        type: aws:dms:Endpoint
        properties:
          certificateArn: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012
          databaseName: test
          endpointId: test-dms-endpoint-tf
          endpointType: source
          engineName: aurora
          extraConnectionAttributes:
          kmsKeyArn: arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
          password: test
          port: 3306
          serverName: test
          sslMode: none
          tags:
            Name: test
          username: test
    

    Create Endpoint Resource

    new Endpoint(name: string, args: EndpointArgs, opts?: CustomResourceOptions);
    @overload
    def Endpoint(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 certificate_arn: Optional[str] = None,
                 database_name: Optional[str] = None,
                 elasticsearch_settings: Optional[EndpointElasticsearchSettingsArgs] = None,
                 endpoint_id: Optional[str] = None,
                 endpoint_type: Optional[str] = None,
                 engine_name: Optional[str] = None,
                 extra_connection_attributes: Optional[str] = None,
                 kafka_settings: Optional[EndpointKafkaSettingsArgs] = None,
                 kinesis_settings: Optional[EndpointKinesisSettingsArgs] = None,
                 kms_key_arn: Optional[str] = None,
                 mongodb_settings: Optional[EndpointMongodbSettingsArgs] = None,
                 password: Optional[str] = None,
                 port: Optional[int] = None,
                 redis_settings: Optional[EndpointRedisSettingsArgs] = None,
                 redshift_settings: Optional[EndpointRedshiftSettingsArgs] = None,
                 s3_settings: Optional[EndpointS3SettingsArgs] = None,
                 secrets_manager_access_role_arn: Optional[str] = None,
                 secrets_manager_arn: Optional[str] = None,
                 server_name: Optional[str] = None,
                 service_access_role: Optional[str] = None,
                 ssl_mode: Optional[str] = None,
                 tags: Optional[Mapping[str, str]] = None,
                 username: Optional[str] = None)
    @overload
    def Endpoint(resource_name: str,
                 args: EndpointArgs,
                 opts: Optional[ResourceOptions] = None)
    func NewEndpoint(ctx *Context, name string, args EndpointArgs, opts ...ResourceOption) (*Endpoint, error)
    public Endpoint(string name, EndpointArgs args, CustomResourceOptions? opts = null)
    public Endpoint(String name, EndpointArgs args)
    public Endpoint(String name, EndpointArgs args, CustomResourceOptions options)
    
    type: aws:dms:Endpoint
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args EndpointArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args EndpointArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args EndpointArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args EndpointArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args EndpointArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Endpoint Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Endpoint resource accepts the following input properties:

    EndpointId string

    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.

    EndpointType string

    Type of endpoint. Valid values are source, target.

    EngineName string

    Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).

    CertificateArn string

    ARN for the certificate.

    DatabaseName string

    Name of the endpoint database.

    ElasticsearchSettings EndpointElasticsearchSettings

    Configuration block for OpenSearch settings. See below.

    ExtraConnectionAttributes string

    Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.

    KafkaSettings EndpointKafkaSettings

    Configuration block for Kafka settings. See below.

    KinesisSettings EndpointKinesisSettings

    Configuration block for Kinesis settings. See below.

    KmsKeyArn string

    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

    The following arguments are optional:

    MongodbSettings EndpointMongodbSettings

    Configuration block for MongoDB settings. See below.

    Password string

    Password to be used to login to the endpoint database.

    Port int

    Port used by the endpoint database.

    RedisSettings EndpointRedisSettings
    RedshiftSettings EndpointRedshiftSettings

    Configuration block for Redshift settings. See below.

    S3Settings EndpointS3Settings

    (Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.

    SecretsManagerAccessRoleArn string

    ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in SecretsManagerSecret.

    SecretsManagerArn string

    Full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.

    ServerName string

    Host name of the server.

    ServiceAccessRole string

    ARN used by the service access IAM role for dynamodb endpoints.

    SslMode string

    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full

    Tags Dictionary<string, string>

    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    Username string

    User name to be used to login to the endpoint database.

    EndpointId string

    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.

    EndpointType string

    Type of endpoint. Valid values are source, target.

    EngineName string

    Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).

    CertificateArn string

    ARN for the certificate.

    DatabaseName string

    Name of the endpoint database.

    ElasticsearchSettings EndpointElasticsearchSettingsArgs

    Configuration block for OpenSearch settings. See below.

    ExtraConnectionAttributes string

    Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.

    KafkaSettings EndpointKafkaSettingsArgs

    Configuration block for Kafka settings. See below.

    KinesisSettings EndpointKinesisSettingsArgs

    Configuration block for Kinesis settings. See below.

    KmsKeyArn string

    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

    The following arguments are optional:

    MongodbSettings EndpointMongodbSettingsArgs

    Configuration block for MongoDB settings. See below.

    Password string

    Password to be used to login to the endpoint database.

    Port int

    Port used by the endpoint database.

    RedisSettings EndpointRedisSettingsArgs
    RedshiftSettings EndpointRedshiftSettingsArgs

    Configuration block for Redshift settings. See below.

    S3Settings EndpointS3SettingsArgs

    (Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.

    SecretsManagerAccessRoleArn string

    ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in SecretsManagerSecret.

    SecretsManagerArn string

    Full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.

    ServerName string

    Host name of the server.

    ServiceAccessRole string

    ARN used by the service access IAM role for dynamodb endpoints.

    SslMode string

    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full

    Tags map[string]string

    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    Username string

    User name to be used to login to the endpoint database.

    endpointId String

    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.

    endpointType String

    Type of endpoint. Valid values are source, target.

    engineName String

    Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).

    certificateArn String

    ARN for the certificate.

    databaseName String

    Name of the endpoint database.

    elasticsearchSettings EndpointElasticsearchSettings

    Configuration block for OpenSearch settings. See below.

    extraConnectionAttributes String

    Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.

    kafkaSettings EndpointKafkaSettings

    Configuration block for Kafka settings. See below.

    kinesisSettings EndpointKinesisSettings

    Configuration block for Kinesis settings. See below.

    kmsKeyArn String

    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

    The following arguments are optional:

    mongodbSettings EndpointMongodbSettings

    Configuration block for MongoDB settings. See below.

    password String

    Password to be used to login to the endpoint database.

    port Integer

    Port used by the endpoint database.

    redisSettings EndpointRedisSettings
    redshiftSettings EndpointRedshiftSettings

    Configuration block for Redshift settings. See below.

    s3Settings EndpointS3Settings

    (Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.

    secretsManagerAccessRoleArn String

    ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in SecretsManagerSecret.

    secretsManagerArn String

    Full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.

    serverName String

    Host name of the server.

    serviceAccessRole String

    ARN used by the service access IAM role for dynamodb endpoints.

    sslMode String

    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full

    tags Map<String,String>

    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    username String

    User name to be used to login to the endpoint database.

    endpointId string

    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.

    endpointType string

    Type of endpoint. Valid values are source, target.

    engineName string

    Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).

    certificateArn string

    ARN for the certificate.

    databaseName string

    Name of the endpoint database.

    elasticsearchSettings EndpointElasticsearchSettings

    Configuration block for OpenSearch settings. See below.

    extraConnectionAttributes string

    Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.

    kafkaSettings EndpointKafkaSettings

    Configuration block for Kafka settings. See below.

    kinesisSettings EndpointKinesisSettings

    Configuration block for Kinesis settings. See below.

    kmsKeyArn string

    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

    The following arguments are optional:

    mongodbSettings EndpointMongodbSettings

    Configuration block for MongoDB settings. See below.

    password string

    Password to be used to login to the endpoint database.

    port number

    Port used by the endpoint database.

    redisSettings EndpointRedisSettings
    redshiftSettings EndpointRedshiftSettings

    Configuration block for Redshift settings. See below.

    s3Settings EndpointS3Settings

    (Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.

    secretsManagerAccessRoleArn string

    ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in SecretsManagerSecret.

    secretsManagerArn string

    Full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.

    serverName string

    Host name of the server.

    serviceAccessRole string

    ARN used by the service access IAM role for dynamodb endpoints.

    sslMode string

    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full

    tags {[key: string]: string}

    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    username string

    User name to be used to login to the endpoint database.

    endpoint_id str

    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.

    endpoint_type str

    Type of endpoint. Valid values are source, target.

    engine_name str

    Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).

    certificate_arn str

    ARN for the certificate.

    database_name str

    Name of the endpoint database.

    elasticsearch_settings EndpointElasticsearchSettingsArgs

    Configuration block for OpenSearch settings. See below.

    extra_connection_attributes str

    Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.

    kafka_settings EndpointKafkaSettingsArgs

    Configuration block for Kafka settings. See below.

    kinesis_settings EndpointKinesisSettingsArgs

    Configuration block for Kinesis settings. See below.

    kms_key_arn str

    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

    The following arguments are optional:

    mongodb_settings EndpointMongodbSettingsArgs

    Configuration block for MongoDB settings. See below.

    password str

    Password to be used to login to the endpoint database.

    port int

    Port used by the endpoint database.

    redis_settings EndpointRedisSettingsArgs
    redshift_settings EndpointRedshiftSettingsArgs

    Configuration block for Redshift settings. See below.

    s3_settings EndpointS3SettingsArgs

    (Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.

    secrets_manager_access_role_arn str

    ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in SecretsManagerSecret.

    secrets_manager_arn str

    Full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.

    server_name str

    Host name of the server.

    service_access_role str

    ARN used by the service access IAM role for dynamodb endpoints.

    ssl_mode str

    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full

    tags Mapping[str, str]

    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    username str

    User name to be used to login to the endpoint database.

    endpointId String

    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.

    endpointType String

    Type of endpoint. Valid values are source, target.

    engineName String

    Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).

    certificateArn String

    ARN for the certificate.

    databaseName String

    Name of the endpoint database.

    elasticsearchSettings Property Map

    Configuration block for OpenSearch settings. See below.

    extraConnectionAttributes String

    Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.

    kafkaSettings Property Map

    Configuration block for Kafka settings. See below.

    kinesisSettings Property Map

    Configuration block for Kinesis settings. See below.

    kmsKeyArn String

    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

    The following arguments are optional:

    mongodbSettings Property Map

    Configuration block for MongoDB settings. See below.

    password String

    Password to be used to login to the endpoint database.

    port Number

    Port used by the endpoint database.

    redisSettings Property Map
    redshiftSettings Property Map

    Configuration block for Redshift settings. See below.

    s3Settings Property Map

    (Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.

    secretsManagerAccessRoleArn String

    ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in SecretsManagerSecret.

    secretsManagerArn String

    Full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.

    serverName String

    Host name of the server.

    serviceAccessRole String

    ARN used by the service access IAM role for dynamodb endpoints.

    sslMode String

    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full

    tags Map<String>

    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    username String

    User name to be used to login to the endpoint database.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Endpoint resource produces the following output properties:

    EndpointArn string

    ARN for the endpoint.

    Id string

    The provider-assigned unique ID for this managed resource.

    TagsAll Dictionary<string, string>

    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    EndpointArn string

    ARN for the endpoint.

    Id string

    The provider-assigned unique ID for this managed resource.

    TagsAll map[string]string

    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    endpointArn String

    ARN for the endpoint.

    id String

    The provider-assigned unique ID for this managed resource.

    tagsAll Map<String,String>

    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    endpointArn string

    ARN for the endpoint.

    id string

    The provider-assigned unique ID for this managed resource.

    tagsAll {[key: string]: string}

    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    endpoint_arn str

    ARN for the endpoint.

    id str

    The provider-assigned unique ID for this managed resource.

    tags_all Mapping[str, str]

    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    endpointArn String

    ARN for the endpoint.

    id String

    The provider-assigned unique ID for this managed resource.

    tagsAll Map<String>

    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    Look up Existing Endpoint Resource

    Get an existing Endpoint resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: EndpointState, opts?: CustomResourceOptions): Endpoint
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            certificate_arn: Optional[str] = None,
            database_name: Optional[str] = None,
            elasticsearch_settings: Optional[EndpointElasticsearchSettingsArgs] = None,
            endpoint_arn: Optional[str] = None,
            endpoint_id: Optional[str] = None,
            endpoint_type: Optional[str] = None,
            engine_name: Optional[str] = None,
            extra_connection_attributes: Optional[str] = None,
            kafka_settings: Optional[EndpointKafkaSettingsArgs] = None,
            kinesis_settings: Optional[EndpointKinesisSettingsArgs] = None,
            kms_key_arn: Optional[str] = None,
            mongodb_settings: Optional[EndpointMongodbSettingsArgs] = None,
            password: Optional[str] = None,
            port: Optional[int] = None,
            redis_settings: Optional[EndpointRedisSettingsArgs] = None,
            redshift_settings: Optional[EndpointRedshiftSettingsArgs] = None,
            s3_settings: Optional[EndpointS3SettingsArgs] = None,
            secrets_manager_access_role_arn: Optional[str] = None,
            secrets_manager_arn: Optional[str] = None,
            server_name: Optional[str] = None,
            service_access_role: Optional[str] = None,
            ssl_mode: Optional[str] = None,
            tags: Optional[Mapping[str, str]] = None,
            tags_all: Optional[Mapping[str, str]] = None,
            username: Optional[str] = None) -> Endpoint
    func GetEndpoint(ctx *Context, name string, id IDInput, state *EndpointState, opts ...ResourceOption) (*Endpoint, error)
    public static Endpoint Get(string name, Input<string> id, EndpointState? state, CustomResourceOptions? opts = null)
    public static Endpoint get(String name, Output<String> id, EndpointState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    CertificateArn string

    ARN for the certificate.

    DatabaseName string

    Name of the endpoint database.

    ElasticsearchSettings EndpointElasticsearchSettings

    Configuration block for OpenSearch settings. See below.

    EndpointArn string

    ARN for the endpoint.

    EndpointId string

    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.

    EndpointType string

    Type of endpoint. Valid values are source, target.

    EngineName string

    Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).

    ExtraConnectionAttributes string

    Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.

    KafkaSettings EndpointKafkaSettings

    Configuration block for Kafka settings. See below.

    KinesisSettings EndpointKinesisSettings

    Configuration block for Kinesis settings. See below.

    KmsKeyArn string

    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

    The following arguments are optional:

    MongodbSettings EndpointMongodbSettings

    Configuration block for MongoDB settings. See below.

    Password string

    Password to be used to login to the endpoint database.

    Port int

    Port used by the endpoint database.

    RedisSettings EndpointRedisSettings
    RedshiftSettings EndpointRedshiftSettings

    Configuration block for Redshift settings. See below.

    S3Settings EndpointS3Settings

    (Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.

    SecretsManagerAccessRoleArn string

    ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in SecretsManagerSecret.

    SecretsManagerArn string

    Full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.

    ServerName string

    Host name of the server.

    ServiceAccessRole string

    ARN used by the service access IAM role for dynamodb endpoints.

    SslMode string

    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full

    Tags Dictionary<string, string>

    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    TagsAll Dictionary<string, string>

    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    Username string

    User name to be used to login to the endpoint database.

    CertificateArn string

    ARN for the certificate.

    DatabaseName string

    Name of the endpoint database.

    ElasticsearchSettings EndpointElasticsearchSettingsArgs

    Configuration block for OpenSearch settings. See below.

    EndpointArn string

    ARN for the endpoint.

    EndpointId string

    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.

    EndpointType string

    Type of endpoint. Valid values are source, target.

    EngineName string

    Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).

    ExtraConnectionAttributes string

    Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.

    KafkaSettings EndpointKafkaSettingsArgs

    Configuration block for Kafka settings. See below.

    KinesisSettings EndpointKinesisSettingsArgs

    Configuration block for Kinesis settings. See below.

    KmsKeyArn string

    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

    The following arguments are optional:

    MongodbSettings EndpointMongodbSettingsArgs

    Configuration block for MongoDB settings. See below.

    Password string

    Password to be used to login to the endpoint database.

    Port int

    Port used by the endpoint database.

    RedisSettings EndpointRedisSettingsArgs
    RedshiftSettings EndpointRedshiftSettingsArgs

    Configuration block for Redshift settings. See below.

    S3Settings EndpointS3SettingsArgs

    (Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.

    SecretsManagerAccessRoleArn string

    ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in SecretsManagerSecret.

    SecretsManagerArn string

    Full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.

    ServerName string

    Host name of the server.

    ServiceAccessRole string

    ARN used by the service access IAM role for dynamodb endpoints.

    SslMode string

    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full

    Tags map[string]string

    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    TagsAll map[string]string

    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    Username string

    User name to be used to login to the endpoint database.

    certificateArn String

    ARN for the certificate.

    databaseName String

    Name of the endpoint database.

    elasticsearchSettings EndpointElasticsearchSettings

    Configuration block for OpenSearch settings. See below.

    endpointArn String

    ARN for the endpoint.

    endpointId String

    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.

    endpointType String

    Type of endpoint. Valid values are source, target.

    engineName String

    Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).

    extraConnectionAttributes String

    Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.

    kafkaSettings EndpointKafkaSettings

    Configuration block for Kafka settings. See below.

    kinesisSettings EndpointKinesisSettings

    Configuration block for Kinesis settings. See below.

    kmsKeyArn String

    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

    The following arguments are optional:

    mongodbSettings EndpointMongodbSettings

    Configuration block for MongoDB settings. See below.

    password String

    Password to be used to login to the endpoint database.

    port Integer

    Port used by the endpoint database.

    redisSettings EndpointRedisSettings
    redshiftSettings EndpointRedshiftSettings

    Configuration block for Redshift settings. See below.

    s3Settings EndpointS3Settings

    (Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.

    secretsManagerAccessRoleArn String

    ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in SecretsManagerSecret.

    secretsManagerArn String

    Full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.

    serverName String

    Host name of the server.

    serviceAccessRole String

    ARN used by the service access IAM role for dynamodb endpoints.

    sslMode String

    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full

    tags Map<String,String>

    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    tagsAll Map<String,String>

    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    username String

    User name to be used to login to the endpoint database.

    certificateArn string

    ARN for the certificate.

    databaseName string

    Name of the endpoint database.

    elasticsearchSettings EndpointElasticsearchSettings

    Configuration block for OpenSearch settings. See below.

    endpointArn string

    ARN for the endpoint.

    endpointId string

    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.

    endpointType string

    Type of endpoint. Valid values are source, target.

    engineName string

    Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).

    extraConnectionAttributes string

    Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.

    kafkaSettings EndpointKafkaSettings

    Configuration block for Kafka settings. See below.

    kinesisSettings EndpointKinesisSettings

    Configuration block for Kinesis settings. See below.

    kmsKeyArn string

    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

    The following arguments are optional:

    mongodbSettings EndpointMongodbSettings

    Configuration block for MongoDB settings. See below.

    password string

    Password to be used to login to the endpoint database.

    port number

    Port used by the endpoint database.

    redisSettings EndpointRedisSettings
    redshiftSettings EndpointRedshiftSettings

    Configuration block for Redshift settings. See below.

    s3Settings EndpointS3Settings

    (Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.

    secretsManagerAccessRoleArn string

    ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in SecretsManagerSecret.

    secretsManagerArn string

    Full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.

    serverName string

    Host name of the server.

    serviceAccessRole string

    ARN used by the service access IAM role for dynamodb endpoints.

    sslMode string

    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full

    tags {[key: string]: string}

    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    tagsAll {[key: string]: string}

    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    username string

    User name to be used to login to the endpoint database.

    certificate_arn str

    ARN for the certificate.

    database_name str

    Name of the endpoint database.

    elasticsearch_settings EndpointElasticsearchSettingsArgs

    Configuration block for OpenSearch settings. See below.

    endpoint_arn str

    ARN for the endpoint.

    endpoint_id str

    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.

    endpoint_type str

    Type of endpoint. Valid values are source, target.

    engine_name str

    Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).

    extra_connection_attributes str

    Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.

    kafka_settings EndpointKafkaSettingsArgs

    Configuration block for Kafka settings. See below.

    kinesis_settings EndpointKinesisSettingsArgs

    Configuration block for Kinesis settings. See below.

    kms_key_arn str

    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

    The following arguments are optional:

    mongodb_settings EndpointMongodbSettingsArgs

    Configuration block for MongoDB settings. See below.

    password str

    Password to be used to login to the endpoint database.

    port int

    Port used by the endpoint database.

    redis_settings EndpointRedisSettingsArgs
    redshift_settings EndpointRedshiftSettingsArgs

    Configuration block for Redshift settings. See below.

    s3_settings EndpointS3SettingsArgs

    (Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.

    secrets_manager_access_role_arn str

    ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in SecretsManagerSecret.

    secrets_manager_arn str

    Full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.

    server_name str

    Host name of the server.

    service_access_role str

    ARN used by the service access IAM role for dynamodb endpoints.

    ssl_mode str

    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full

    tags Mapping[str, str]

    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    tags_all Mapping[str, str]

    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    username str

    User name to be used to login to the endpoint database.

    certificateArn String

    ARN for the certificate.

    databaseName String

    Name of the endpoint database.

    elasticsearchSettings Property Map

    Configuration block for OpenSearch settings. See below.

    endpointArn String

    ARN for the endpoint.

    endpointId String

    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.

    endpointType String

    Type of endpoint. Valid values are source, target.

    engineName String

    Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).

    extraConnectionAttributes String

    Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.

    kafkaSettings Property Map

    Configuration block for Kafka settings. See below.

    kinesisSettings Property Map

    Configuration block for Kinesis settings. See below.

    kmsKeyArn String

    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

    The following arguments are optional:

    mongodbSettings Property Map

    Configuration block for MongoDB settings. See below.

    password String

    Password to be used to login to the endpoint database.

    port Number

    Port used by the endpoint database.

    redisSettings Property Map
    redshiftSettings Property Map

    Configuration block for Redshift settings. See below.

    s3Settings Property Map

    (Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.

    secretsManagerAccessRoleArn String

    ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in SecretsManagerSecret.

    secretsManagerArn String

    Full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.

    serverName String

    Host name of the server.

    serviceAccessRole String

    ARN used by the service access IAM role for dynamodb endpoints.

    sslMode String

    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full

    tags Map<String>

    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

    tagsAll Map<String>

    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated:

    Please use tags instead.

    username String

    User name to be used to login to the endpoint database.

    Supporting Types

    EndpointElasticsearchSettings, EndpointElasticsearchSettingsArgs

    EndpointUri string

    Endpoint for the OpenSearch cluster.

    ServiceAccessRoleArn string

    ARN of the IAM Role with permissions to write to the OpenSearch cluster.

    ErrorRetryDuration int

    Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.

    FullLoadErrorPercentage int

    Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.

    EndpointUri string

    Endpoint for the OpenSearch cluster.

    ServiceAccessRoleArn string

    ARN of the IAM Role with permissions to write to the OpenSearch cluster.

    ErrorRetryDuration int

    Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.

    FullLoadErrorPercentage int

    Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.

    endpointUri String

    Endpoint for the OpenSearch cluster.

    serviceAccessRoleArn String

    ARN of the IAM Role with permissions to write to the OpenSearch cluster.

    errorRetryDuration Integer

    Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.

    fullLoadErrorPercentage Integer

    Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.

    endpointUri string

    Endpoint for the OpenSearch cluster.

    serviceAccessRoleArn string

    ARN of the IAM Role with permissions to write to the OpenSearch cluster.

    errorRetryDuration number

    Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.

    fullLoadErrorPercentage number

    Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.

    endpoint_uri str

    Endpoint for the OpenSearch cluster.

    service_access_role_arn str

    ARN of the IAM Role with permissions to write to the OpenSearch cluster.

    error_retry_duration int

    Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.

    full_load_error_percentage int

    Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.

    endpointUri String

    Endpoint for the OpenSearch cluster.

    serviceAccessRoleArn String

    ARN of the IAM Role with permissions to write to the OpenSearch cluster.

    errorRetryDuration Number

    Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.

    fullLoadErrorPercentage Number

    Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.

    EndpointKafkaSettings, EndpointKafkaSettingsArgs

    Broker string

    Kafka broker location. Specify in the form broker-hostname-or-ip:port.

    IncludeControlDetails bool

    Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.

    IncludeNullAndEmpty bool

    Include NULL and empty columns for records migrated to the endpoint. Default is false.

    IncludePartitionValue bool

    Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.

    IncludeTableAlterOperations bool

    Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.

    IncludeTransactionDetails bool

    Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.

    MessageFormat string

    Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).

    MessageMaxBytes int

    Maximum size in bytes for records created on the endpoint Default is 1,000,000.

    NoHexPrefix bool

    Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.

    PartitionIncludeSchemaTable bool

    Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.

    SaslPassword string

    Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

    SaslUsername string

    Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

    SecurityProtocol string

    Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.

    SslCaCertificateArn string

    ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.

    SslClientCertificateArn string

    ARN of the client certificate used to securely connect to a Kafka target endpoint.

    SslClientKeyArn string

    ARN for the client private key used to securely connect to a Kafka target endpoint.

    SslClientKeyPassword string

    Password for the client private key used to securely connect to a Kafka target endpoint.

    Topic string

    Kafka topic for migration. Default is kafka-default-topic.

    Broker string

    Kafka broker location. Specify in the form broker-hostname-or-ip:port.

    IncludeControlDetails bool

    Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.

    IncludeNullAndEmpty bool

    Include NULL and empty columns for records migrated to the endpoint. Default is false.

    IncludePartitionValue bool

    Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.

    IncludeTableAlterOperations bool

    Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.

    IncludeTransactionDetails bool

    Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.

    MessageFormat string

    Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).

    MessageMaxBytes int

    Maximum size in bytes for records created on the endpoint Default is 1,000,000.

    NoHexPrefix bool

    Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.

    PartitionIncludeSchemaTable bool

    Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.

    SaslPassword string

    Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

    SaslUsername string

    Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

    SecurityProtocol string

    Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.

    SslCaCertificateArn string

    ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.

    SslClientCertificateArn string

    ARN of the client certificate used to securely connect to a Kafka target endpoint.

    SslClientKeyArn string

    ARN for the client private key used to securely connect to a Kafka target endpoint.

    SslClientKeyPassword string

    Password for the client private key used to securely connect to a Kafka target endpoint.

    Topic string

    Kafka topic for migration. Default is kafka-default-topic.

    broker String

    Kafka broker location. Specify in the form broker-hostname-or-ip:port.

    includeControlDetails Boolean

    Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.

    includeNullAndEmpty Boolean

    Include NULL and empty columns for records migrated to the endpoint. Default is false.

    includePartitionValue Boolean

    Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.

    includeTableAlterOperations Boolean

    Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.

    includeTransactionDetails Boolean

    Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.

    messageFormat String

    Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).

    messageMaxBytes Integer

    Maximum size in bytes for records created on the endpoint Default is 1,000,000.

    noHexPrefix Boolean

    Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.

    partitionIncludeSchemaTable Boolean

    Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.

    saslPassword String

    Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

    saslUsername String

    Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

    securityProtocol String

    Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.

    sslCaCertificateArn String

    ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.

    sslClientCertificateArn String

    ARN of the client certificate used to securely connect to a Kafka target endpoint.

    sslClientKeyArn String

    ARN for the client private key used to securely connect to a Kafka target endpoint.

    sslClientKeyPassword String

    Password for the client private key used to securely connect to a Kafka target endpoint.

    topic String

    Kafka topic for migration. Default is kafka-default-topic.

    broker string

    Kafka broker location. Specify in the form broker-hostname-or-ip:port.

    includeControlDetails boolean

    Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.

    includeNullAndEmpty boolean

    Include NULL and empty columns for records migrated to the endpoint. Default is false.

    includePartitionValue boolean

    Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.

    includeTableAlterOperations boolean

    Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.

    includeTransactionDetails boolean

    Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.

    messageFormat string

    Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).

    messageMaxBytes number

    Maximum size in bytes for records created on the endpoint Default is 1,000,000.

    noHexPrefix boolean

    Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.

    partitionIncludeSchemaTable boolean

    Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.

    saslPassword string

    Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

    saslUsername string

    Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

    securityProtocol string

    Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.

    sslCaCertificateArn string

    ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.

    sslClientCertificateArn string

    ARN of the client certificate used to securely connect to a Kafka target endpoint.

    sslClientKeyArn string

    ARN for the client private key used to securely connect to a Kafka target endpoint.

    sslClientKeyPassword string

    Password for the client private key used to securely connect to a Kafka target endpoint.

    topic string

    Kafka topic for migration. Default is kafka-default-topic.

    broker str

    Kafka broker location. Specify in the form broker-hostname-or-ip:port.

    include_control_details bool

    Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.

    include_null_and_empty bool

    Include NULL and empty columns for records migrated to the endpoint. Default is false.

    include_partition_value bool

    Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.

    include_table_alter_operations bool

    Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.

    include_transaction_details bool

    Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.

    message_format str

    Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).

    message_max_bytes int

    Maximum size in bytes for records created on the endpoint Default is 1,000,000.

    no_hex_prefix bool

    Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.

    partition_include_schema_table bool

    Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.

    sasl_password str

    Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

    sasl_username str

    Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

    security_protocol str

    Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.

    ssl_ca_certificate_arn str

    ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.

    ssl_client_certificate_arn str

    ARN of the client certificate used to securely connect to a Kafka target endpoint.

    ssl_client_key_arn str

    ARN for the client private key used to securely connect to a Kafka target endpoint.

    ssl_client_key_password str

    Password for the client private key used to securely connect to a Kafka target endpoint.

    topic str

    Kafka topic for migration. Default is kafka-default-topic.

    broker String

    Kafka broker location. Specify in the form broker-hostname-or-ip:port.

    includeControlDetails Boolean

    Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.

    includeNullAndEmpty Boolean

    Include NULL and empty columns for records migrated to the endpoint. Default is false.

    includePartitionValue Boolean

    Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.

    includeTableAlterOperations Boolean

    Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.

    includeTransactionDetails Boolean

    Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.

    messageFormat String

    Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).

    messageMaxBytes Number

    Maximum size in bytes for records created on the endpoint Default is 1,000,000.

    noHexPrefix Boolean

    Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.

    partitionIncludeSchemaTable Boolean

    Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.

    saslPassword String

    Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

    saslUsername String

    Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

    securityProtocol String

    Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.

    sslCaCertificateArn String

    ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.

    sslClientCertificateArn String

    ARN of the client certificate used to securely connect to a Kafka target endpoint.

    sslClientKeyArn String

    ARN for the client private key used to securely connect to a Kafka target endpoint.

    sslClientKeyPassword String

    Password for the client private key used to securely connect to a Kafka target endpoint.

    topic String

    Kafka topic for migration. Default is kafka-default-topic.

    EndpointKinesisSettings, EndpointKinesisSettingsArgs

    IncludeControlDetails bool

    Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.

    IncludeNullAndEmpty bool

    Include NULL and empty columns in the target. Default is false.

    IncludePartitionValue bool

    Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.

    IncludeTableAlterOperations bool

    Includes any data definition language (DDL) operations that change the table in the control data. Default is false.

    IncludeTransactionDetails bool

    Provides detailed transaction information from the source database. Default is false.

    MessageFormat string

    Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).

    PartitionIncludeSchemaTable bool

    Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.

    ServiceAccessRoleArn string

    ARN of the IAM Role with permissions to write to the Kinesis data stream.

    StreamArn string

    ARN of the Kinesis data stream.

    IncludeControlDetails bool

    Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.

    IncludeNullAndEmpty bool

    Include NULL and empty columns in the target. Default is false.

    IncludePartitionValue bool

    Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.

    IncludeTableAlterOperations bool

    Includes any data definition language (DDL) operations that change the table in the control data. Default is false.

    IncludeTransactionDetails bool

    Provides detailed transaction information from the source database. Default is false.

    MessageFormat string

    Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).

    PartitionIncludeSchemaTable bool

    Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.

    ServiceAccessRoleArn string

    ARN of the IAM Role with permissions to write to the Kinesis data stream.

    StreamArn string

    ARN of the Kinesis data stream.

    includeControlDetails Boolean

    Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.

    includeNullAndEmpty Boolean

    Include NULL and empty columns in the target. Default is false.

    includePartitionValue Boolean

    Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.

    includeTableAlterOperations Boolean

    Includes any data definition language (DDL) operations that change the table in the control data. Default is false.

    includeTransactionDetails Boolean

    Provides detailed transaction information from the source database. Default is false.

    messageFormat String

    Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).

    partitionIncludeSchemaTable Boolean

    Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.

    serviceAccessRoleArn String

    ARN of the IAM Role with permissions to write to the Kinesis data stream.

    streamArn String

    ARN of the Kinesis data stream.

    includeControlDetails boolean

    Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.

    includeNullAndEmpty boolean

    Include NULL and empty columns in the target. Default is false.

    includePartitionValue boolean

    Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.

    includeTableAlterOperations boolean

    Includes any data definition language (DDL) operations that change the table in the control data. Default is false.

    includeTransactionDetails boolean

    Provides detailed transaction information from the source database. Default is false.

    messageFormat string

    Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).

    partitionIncludeSchemaTable boolean

    Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.

    serviceAccessRoleArn string

    ARN of the IAM Role with permissions to write to the Kinesis data stream.

    streamArn string

    ARN of the Kinesis data stream.

    include_control_details bool

    Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.

    include_null_and_empty bool

    Include NULL and empty columns in the target. Default is false.

    include_partition_value bool

    Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.

    include_table_alter_operations bool

    Includes any data definition language (DDL) operations that change the table in the control data. Default is false.

    include_transaction_details bool

    Provides detailed transaction information from the source database. Default is false.

    message_format str

    Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).

    partition_include_schema_table bool

    Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.

    service_access_role_arn str

    ARN of the IAM Role with permissions to write to the Kinesis data stream.

    stream_arn str

    ARN of the Kinesis data stream.

    includeControlDetails Boolean

    Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.

    includeNullAndEmpty Boolean

    Include NULL and empty columns in the target. Default is false.

    includePartitionValue Boolean

    Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.

    includeTableAlterOperations Boolean

    Includes any data definition language (DDL) operations that change the table in the control data. Default is false.

    includeTransactionDetails Boolean

    Provides detailed transaction information from the source database. Default is false.

    messageFormat String

    Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).

    partitionIncludeSchemaTable Boolean

    Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.

    serviceAccessRoleArn String

    ARN of the IAM Role with permissions to write to the Kinesis data stream.

    streamArn String

    ARN of the Kinesis data stream.

    EndpointMongodbSettings, EndpointMongodbSettingsArgs

    AuthMechanism string

    Authentication mechanism to access the MongoDB source endpoint. Default is default.

    AuthSource string

    Authentication database name. Not used when auth_type is no. Default is admin.

    AuthType string

    Authentication type to access the MongoDB source endpoint. Default is password.

    DocsToInvestigate string

    Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.

    ExtractDocId string

    Document ID. Use this setting when nesting_level is set to none. Default is false.

    NestingLevel string

    Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).

    AuthMechanism string

    Authentication mechanism to access the MongoDB source endpoint. Default is default.

    AuthSource string

    Authentication database name. Not used when auth_type is no. Default is admin.

    AuthType string

    Authentication type to access the MongoDB source endpoint. Default is password.

    DocsToInvestigate string

    Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.

    ExtractDocId string

    Document ID. Use this setting when nesting_level is set to none. Default is false.

    NestingLevel string

    Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).

    authMechanism String

    Authentication mechanism to access the MongoDB source endpoint. Default is default.

    authSource String

    Authentication database name. Not used when auth_type is no. Default is admin.

    authType String

    Authentication type to access the MongoDB source endpoint. Default is password.

    docsToInvestigate String

    Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.

    extractDocId String

    Document ID. Use this setting when nesting_level is set to none. Default is false.

    nestingLevel String

    Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).

    authMechanism string

    Authentication mechanism to access the MongoDB source endpoint. Default is default.

    authSource string

    Authentication database name. Not used when auth_type is no. Default is admin.

    authType string

    Authentication type to access the MongoDB source endpoint. Default is password.

    docsToInvestigate string

    Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.

    extractDocId string

    Document ID. Use this setting when nesting_level is set to none. Default is false.

    nestingLevel string

    Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).

    auth_mechanism str

    Authentication mechanism to access the MongoDB source endpoint. Default is default.

    auth_source str

    Authentication database name. Not used when auth_type is no. Default is admin.

    auth_type str

    Authentication type to access the MongoDB source endpoint. Default is password.

    docs_to_investigate str

    Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.

    extract_doc_id str

    Document ID. Use this setting when nesting_level is set to none. Default is false.

    nesting_level str

    Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).

    authMechanism String

    Authentication mechanism to access the MongoDB source endpoint. Default is default.

    authSource String

    Authentication database name. Not used when auth_type is no. Default is admin.

    authType String

    Authentication type to access the MongoDB source endpoint. Default is password.

    docsToInvestigate String

    Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.

    extractDocId String

    Document ID. Use this setting when nesting_level is set to none. Default is false.

    nestingLevel String

    Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).

    EndpointRedisSettings, EndpointRedisSettingsArgs

    AuthType string

    The type of authentication to perform when connecting to a Redis target. Options include none, auth-token, and auth-role. The auth-token option requires an auth_password value to be provided. The auth-role option requires auth_user_name and auth_password values to be provided.

    Port int

    Transmission Control Protocol (TCP) port for the endpoint.

    ServerName string

    Fully qualified domain name of the endpoint.

    AuthPassword string

    The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.

    AuthUserName string

    The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.

    SslCaCertificateArn string

    The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.

    SslSecurityProtocol string

    The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.

    AuthType string

    The type of authentication to perform when connecting to a Redis target. Options include none, auth-token, and auth-role. The auth-token option requires an auth_password value to be provided. The auth-role option requires auth_user_name and auth_password values to be provided.

    Port int

    Transmission Control Protocol (TCP) port for the endpoint.

    ServerName string

    Fully qualified domain name of the endpoint.

    AuthPassword string

    The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.

    AuthUserName string

    The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.

    SslCaCertificateArn string

    The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.

    SslSecurityProtocol string

    The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.

    authType String

    The type of authentication to perform when connecting to a Redis target. Options include none, auth-token, and auth-role. The auth-token option requires an auth_password value to be provided. The auth-role option requires auth_user_name and auth_password values to be provided.

    port Integer

    Transmission Control Protocol (TCP) port for the endpoint.

    serverName String

    Fully qualified domain name of the endpoint.

    authPassword String

    The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.

    authUserName String

    The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.

    sslCaCertificateArn String

    The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.

    sslSecurityProtocol String

    The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.

    authType string

    The type of authentication to perform when connecting to a Redis target. Options include none, auth-token, and auth-role. The auth-token option requires an auth_password value to be provided. The auth-role option requires auth_user_name and auth_password values to be provided.

    port number

    Transmission Control Protocol (TCP) port for the endpoint.

    serverName string

    Fully qualified domain name of the endpoint.

    authPassword string

    The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.

    authUserName string

    The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.

    sslCaCertificateArn string

    The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.

    sslSecurityProtocol string

    The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.

    auth_type str

    The type of authentication to perform when connecting to a Redis target. Options include none, auth-token, and auth-role. The auth-token option requires an auth_password value to be provided. The auth-role option requires auth_user_name and auth_password values to be provided.

    port int

    Transmission Control Protocol (TCP) port for the endpoint.

    server_name str

    Fully qualified domain name of the endpoint.

    auth_password str

    The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.

    auth_user_name str

    The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.

    ssl_ca_certificate_arn str

    The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.

    ssl_security_protocol str

    The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.

    authType String

    The type of authentication to perform when connecting to a Redis target. Options include none, auth-token, and auth-role. The auth-token option requires an auth_password value to be provided. The auth-role option requires auth_user_name and auth_password values to be provided.

    port Number

    Transmission Control Protocol (TCP) port for the endpoint.

    serverName String

    Fully qualified domain name of the endpoint.

    authPassword String

    The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.

    authUserName String

    The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.

    sslCaCertificateArn String

    The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.

    sslSecurityProtocol String

    The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.

    EndpointRedshiftSettings, EndpointRedshiftSettingsArgs

    BucketFolder string

    Custom S3 Bucket Object prefix for intermediate storage.

    BucketName string

    Custom S3 Bucket name for intermediate storage.

    EncryptionMode string

    The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.

    ServerSideEncryptionKmsKeyId string

    ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.

    ServiceAccessRoleArn string

    Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.

    BucketFolder string

    Custom S3 Bucket Object prefix for intermediate storage.

    BucketName string

    Custom S3 Bucket name for intermediate storage.

    EncryptionMode string

    The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.

    ServerSideEncryptionKmsKeyId string

    ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.

    ServiceAccessRoleArn string

    Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.

    bucketFolder String

    Custom S3 Bucket Object prefix for intermediate storage.

    bucketName String

    Custom S3 Bucket name for intermediate storage.

    encryptionMode String

    The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.

    serverSideEncryptionKmsKeyId String

    ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.

    serviceAccessRoleArn String

    Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.

    bucketFolder string

    Custom S3 Bucket Object prefix for intermediate storage.

    bucketName string

    Custom S3 Bucket name for intermediate storage.

    encryptionMode string

    The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.

    serverSideEncryptionKmsKeyId string

    ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.

    serviceAccessRoleArn string

    Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.

    bucket_folder str

    Custom S3 Bucket Object prefix for intermediate storage.

    bucket_name str

    Custom S3 Bucket name for intermediate storage.

    encryption_mode str

    The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.

    server_side_encryption_kms_key_id str

    ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.

    service_access_role_arn str

    Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.

    bucketFolder String

    Custom S3 Bucket Object prefix for intermediate storage.

    bucketName String

    Custom S3 Bucket name for intermediate storage.

    encryptionMode String

    The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.

    serverSideEncryptionKmsKeyId String

    ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.

    serviceAccessRoleArn String

    Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.

    EndpointS3Settings, EndpointS3SettingsArgs

    AddColumnName bool

    Whether to add column name information to the .csv output file. Default is false.

    BucketFolder string

    Custom S3 Bucket Object prefix for intermediate storage.

    BucketName string

    Custom S3 Bucket name for intermediate storage.

    CannedAclForObjects string

    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.

    CdcInsertsAndUpdates bool

    Whether to write insert and update operations to .csv or .parquet output files. Default is false.

    CdcInsertsOnly bool

    Whether to write insert operations to .csv or .parquet output files. Default is false.

    CdcMaxBatchInterval int

    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.

    CdcMinFileSize int

    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.

    CdcPath string

    Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.

    CompressionType string

    Set to compress target files. Default is NONE. Valid values are GZIP and NONE.

    CsvDelimiter string

    Delimiter used to separate columns in the source files. Default is ,.

    CsvNoSupValue string

    String to use for all columns not included in the supplemental log.

    CsvNullValue string

    String to as null when writing to the target.

    CsvRowDelimiter string

    Delimiter used to separate rows in the source files. Default is \n.

    DataFormat string

    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.

    DataPageSize int

    Size of one data page in bytes. Default is 1048576 (1 MiB).

    DatePartitionDelimiter string

    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.

    DatePartitionEnabled bool

    Partition S3 bucket folders based on transaction commit dates. Default is false.

    DatePartitionSequence string

    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.

    DictPageSizeLimit int

    Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).

    EnableStatistics bool

    Whether to enable statistics for Parquet pages and row groups. Default is true.

    EncodingType string

    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.

    EncryptionMode string

    The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.

    ExternalTableDefinition string

    JSON document that describes how AWS DMS should interpret the data.

    IgnoreHeaderRows int

    When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.

    IncludeOpForFullLoad bool

    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.

    MaxFileSize int

    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).

    ParquetTimestampInMillisecond bool

    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.

    ParquetVersion string

    Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.

    PreserveTransactions bool

    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.

    Rfc4180 bool

    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.

    RowGroupLength int

    Number of rows in a row group. Default is 10000.

    ServerSideEncryptionKmsKeyId string

    ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.

    ServiceAccessRoleArn string

    ARN of the IAM Role with permissions to write to the OpenSearch cluster.

    TimestampColumnName string

    Column to add with timestamp information to the endpoint data for an Amazon S3 target.

    UseCsvNoSupValue bool

    Whether to use csv_no_sup_value for columns not included in the supplemental log.

    UseTaskStartTimeForFullLoadTimestamp bool

    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.

    AddColumnName bool

    Whether to add column name information to the .csv output file. Default is false.

    BucketFolder string

    Custom S3 Bucket Object prefix for intermediate storage.

    BucketName string

    Custom S3 Bucket name for intermediate storage.

    CannedAclForObjects string

    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.

    CdcInsertsAndUpdates bool

    Whether to write insert and update operations to .csv or .parquet output files. Default is false.

    CdcInsertsOnly bool

    Whether to write insert operations to .csv or .parquet output files. Default is false.

    CdcMaxBatchInterval int

    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.

    CdcMinFileSize int

    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.

    CdcPath string

    Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.

    CompressionType string

    Set to compress target files. Default is NONE. Valid values are GZIP and NONE.

    CsvDelimiter string

    Delimiter used to separate columns in the source files. Default is ,.

    CsvNoSupValue string

    String to use for all columns not included in the supplemental log.

    CsvNullValue string

    String to as null when writing to the target.

    CsvRowDelimiter string

    Delimiter used to separate rows in the source files. Default is \n.

    DataFormat string

    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.

    DataPageSize int

    Size of one data page in bytes. Default is 1048576 (1 MiB).

    DatePartitionDelimiter string

    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.

    DatePartitionEnabled bool

    Partition S3 bucket folders based on transaction commit dates. Default is false.

    DatePartitionSequence string

    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.

    DictPageSizeLimit int

    Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).

    EnableStatistics bool

    Whether to enable statistics for Parquet pages and row groups. Default is true.

    EncodingType string

    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.

    EncryptionMode string

    The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.

    ExternalTableDefinition string

    JSON document that describes how AWS DMS should interpret the data.

    IgnoreHeaderRows int

    When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.

    IncludeOpForFullLoad bool

    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.

    MaxFileSize int

    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).

    ParquetTimestampInMillisecond bool

    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.

    ParquetVersion string

    Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.

    PreserveTransactions bool

    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.

    Rfc4180 bool

    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.

    RowGroupLength int

    Number of rows in a row group. Default is 10000.

    ServerSideEncryptionKmsKeyId string

    ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.

    ServiceAccessRoleArn string

    ARN of the IAM Role with permissions to write to the OpenSearch cluster.

    TimestampColumnName string

    Column to add with timestamp information to the endpoint data for an Amazon S3 target.

    UseCsvNoSupValue bool

    Whether to use csv_no_sup_value for columns not included in the supplemental log.

    UseTaskStartTimeForFullLoadTimestamp bool

    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.

    addColumnName Boolean

    Whether to add column name information to the .csv output file. Default is false.

    bucketFolder String

    Custom S3 Bucket Object prefix for intermediate storage.

    bucketName String

    Custom S3 Bucket name for intermediate storage.

    cannedAclForObjects String

    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.

    cdcInsertsAndUpdates Boolean

    Whether to write insert and update operations to .csv or .parquet output files. Default is false.

    cdcInsertsOnly Boolean

    Whether to write insert operations to .csv or .parquet output files. Default is false.

    cdcMaxBatchInterval Integer

    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.

    cdcMinFileSize Integer

    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.

    cdcPath String

    Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.

    compressionType String

    Set to compress target files. Default is NONE. Valid values are GZIP and NONE.

    csvDelimiter String

    Delimiter used to separate columns in the source files. Default is ,.

    csvNoSupValue String

    String to use for all columns not included in the supplemental log.

    csvNullValue String

    String to as null when writing to the target.

    csvRowDelimiter String

    Delimiter used to separate rows in the source files. Default is \n.

    dataFormat String

    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.

    dataPageSize Integer

    Size of one data page in bytes. Default is 1048576 (1 MiB).

    datePartitionDelimiter String

    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.

    datePartitionEnabled Boolean

    Partition S3 bucket folders based on transaction commit dates. Default is false.

    datePartitionSequence String

    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.

    dictPageSizeLimit Integer

    Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).

    enableStatistics Boolean

    Whether to enable statistics for Parquet pages and row groups. Default is true.

    encodingType String

    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.

    encryptionMode String

    The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.

    externalTableDefinition String

    JSON document that describes how AWS DMS should interpret the data.

    ignoreHeaderRows Integer

    When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.

    includeOpForFullLoad Boolean

    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.

    maxFileSize Integer

    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).

    parquetTimestampInMillisecond Boolean

    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.

    parquetVersion String

    Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.

    preserveTransactions Boolean

    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.

    rfc4180 Boolean

    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.

    rowGroupLength Integer

    Number of rows in a row group. Default is 10000.

    serverSideEncryptionKmsKeyId String

    ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.

    serviceAccessRoleArn String

    ARN of the IAM Role with permissions to write to the OpenSearch cluster.

    timestampColumnName String

    Column to add with timestamp information to the endpoint data for an Amazon S3 target.

    useCsvNoSupValue Boolean

    Whether to use csv_no_sup_value for columns not included in the supplemental log.

    useTaskStartTimeForFullLoadTimestamp Boolean

    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.

    addColumnName boolean

    Whether to add column name information to the .csv output file. Default is false.

    bucketFolder string

    Custom S3 Bucket Object prefix for intermediate storage.

    bucketName string

    Custom S3 Bucket name for intermediate storage.

    cannedAclForObjects string

    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.

    cdcInsertsAndUpdates boolean

    Whether to write insert and update operations to .csv or .parquet output files. Default is false.

    cdcInsertsOnly boolean

    Whether to write insert operations to .csv or .parquet output files. Default is false.

    cdcMaxBatchInterval number

    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.

    cdcMinFileSize number

    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.

    cdcPath string

    Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.

    compressionType string

    Set to compress target files. Default is NONE. Valid values are GZIP and NONE.

    csvDelimiter string

    Delimiter used to separate columns in the source files. Default is ,.

    csvNoSupValue string

    String to use for all columns not included in the supplemental log.

    csvNullValue string

    String to as null when writing to the target.

    csvRowDelimiter string

    Delimiter used to separate rows in the source files. Default is \n.

    dataFormat string

    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.

    dataPageSize number

    Size of one data page in bytes. Default is 1048576 (1 MiB).

    datePartitionDelimiter string

    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.

    datePartitionEnabled boolean

    Partition S3 bucket folders based on transaction commit dates. Default is false.

    datePartitionSequence string

    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.

    dictPageSizeLimit number

    Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).

    enableStatistics boolean

    Whether to enable statistics for Parquet pages and row groups. Default is true.

    encodingType string

    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.

    encryptionMode string

    The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.

    externalTableDefinition string

    JSON document that describes how AWS DMS should interpret the data.

    ignoreHeaderRows number

    When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.

    includeOpForFullLoad boolean

    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.

    maxFileSize number

    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).

    parquetTimestampInMillisecond boolean

    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.

    parquetVersion string

    Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.

    preserveTransactions boolean

    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.

    rfc4180 boolean

    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.

    rowGroupLength number

    Number of rows in a row group. Default is 10000.

    serverSideEncryptionKmsKeyId string

    ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.

    serviceAccessRoleArn string

    ARN of the IAM Role with permissions to write to the OpenSearch cluster.

    timestampColumnName string

    Column to add with timestamp information to the endpoint data for an Amazon S3 target.

    useCsvNoSupValue boolean

    Whether to use csv_no_sup_value for columns not included in the supplemental log.

    useTaskStartTimeForFullLoadTimestamp boolean

    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.

    add_column_name bool

    Whether to add column name information to the .csv output file. Default is false.

    bucket_folder str

    Custom S3 Bucket Object prefix for intermediate storage.

    bucket_name str

    Custom S3 Bucket name for intermediate storage.

    canned_acl_for_objects str

    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.

    cdc_inserts_and_updates bool

    Whether to write insert and update operations to .csv or .parquet output files. Default is false.

    cdc_inserts_only bool

    Whether to write insert operations to .csv or .parquet output files. Default is false.

    cdc_max_batch_interval int

    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.

    cdc_min_file_size int

    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.

    cdc_path str

    Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.

    compression_type str

    Set to compress target files. Default is NONE. Valid values are GZIP and NONE.

    csv_delimiter str

    Delimiter used to separate columns in the source files. Default is ,.

    csv_no_sup_value str

    String to use for all columns not included in the supplemental log.

    csv_null_value str

    String to as null when writing to the target.

    csv_row_delimiter str

    Delimiter used to separate rows in the source files. Default is \n.

    data_format str

    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.

    data_page_size int

    Size of one data page in bytes. Default is 1048576 (1 MiB).

    date_partition_delimiter str

    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.

    date_partition_enabled bool

    Partition S3 bucket folders based on transaction commit dates. Default is false.

    date_partition_sequence str

    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.

    dict_page_size_limit int

    Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).

    enable_statistics bool

    Whether to enable statistics for Parquet pages and row groups. Default is true.

    encoding_type str

    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.

    encryption_mode str

    The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.

    external_table_definition str

    JSON document that describes how AWS DMS should interpret the data.

    ignore_header_rows int

    When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.

    include_op_for_full_load bool

    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.

    max_file_size int

    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).

    parquet_timestamp_in_millisecond bool

    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.

    parquet_version str

    Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.

    preserve_transactions bool

    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.

    rfc4180 bool

    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.

    row_group_length int

    Number of rows in a row group. Default is 10000.

    server_side_encryption_kms_key_id str

    ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.

    service_access_role_arn str

    ARN of the IAM Role with permissions to write to the OpenSearch cluster.

    timestamp_column_name str

    Column to add with timestamp information to the endpoint data for an Amazon S3 target.

    use_csv_no_sup_value bool

    Whether to use csv_no_sup_value for columns not included in the supplemental log.

    use_task_start_time_for_full_load_timestamp bool

    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.

    addColumnName Boolean

    Whether to add column name information to the .csv output file. Default is false.

    bucketFolder String

    Custom S3 Bucket Object prefix for intermediate storage.

    bucketName String

    Custom S3 Bucket name for intermediate storage.

    cannedAclForObjects String

    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.

    cdcInsertsAndUpdates Boolean

    Whether to write insert and update operations to .csv or .parquet output files. Default is false.

    cdcInsertsOnly Boolean

    Whether to write insert operations to .csv or .parquet output files. Default is false.

    cdcMaxBatchInterval Number

    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.

    cdcMinFileSize Number

    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.

    cdcPath String

    Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.

    compressionType String

    Set to compress target files. Default is NONE. Valid values are GZIP and NONE.

    csvDelimiter String

    Delimiter used to separate columns in the source files. Default is ,.

    csvNoSupValue String

    String to use for all columns not included in the supplemental log.

    csvNullValue String

    String to as null when writing to the target.

    csvRowDelimiter String

    Delimiter used to separate rows in the source files. Default is \n.

    dataFormat String

    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.

    dataPageSize Number

    Size of one data page in bytes. Default is 1048576 (1 MiB).

    datePartitionDelimiter String

    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.

    datePartitionEnabled Boolean

    Partition S3 bucket folders based on transaction commit dates. Default is false.

    datePartitionSequence String

    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.

    dictPageSizeLimit Number

    Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).

    enableStatistics Boolean

    Whether to enable statistics for Parquet pages and row groups. Default is true.

    encodingType String

    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.

    encryptionMode String

    The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.

    externalTableDefinition String

    JSON document that describes how AWS DMS should interpret the data.

    ignoreHeaderRows Number

    When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.

    includeOpForFullLoad Boolean

    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.

    maxFileSize Number

    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).

    parquetTimestampInMillisecond Boolean

    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.

    parquetVersion String

    Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.

    preserveTransactions Boolean

    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.

    rfc4180 Boolean

    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.

    rowGroupLength Number

    Number of rows in a row group. Default is 10000.

    serverSideEncryptionKmsKeyId String

    ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.

    serviceAccessRoleArn String

    ARN of the IAM Role with permissions to write to the OpenSearch cluster.

    timestampColumnName String

    Column to add with timestamp information to the endpoint data for an Amazon S3 target.

    useCsvNoSupValue Boolean

    Whether to use csv_no_sup_value for columns not included in the supplemental log.

    useTaskStartTimeForFullLoadTimestamp Boolean

    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.

    Import

    Using pulumi import, import endpoints using the endpoint_id. For example:

     $ pulumi import aws:dms/endpoint:Endpoint test test-dms-endpoint-tf
    

    Package Details

    Repository
    AWS Classic pulumi/pulumi-aws
    License
    Apache-2.0
    Notes

    This Pulumi package is based on the aws Terraform Provider.

    aws logo

    Try AWS Native preview for resources not in the classic version.

    AWS Classic v6.3.0 published on Thursday, Sep 28, 2023 by Pulumi