1. Packages
  2. AWS
  3. API Docs
  4. dms
  5. S3Endpoint
AWS v6.64.0 published on Friday, Dec 6, 2024 by Pulumi

aws.dms.S3Endpoint

Explore with Pulumi AI

aws logo
AWS v6.64.0 published on Friday, Dec 6, 2024 by Pulumi

    Provides a DMS (Data Migration Service) S3 endpoint resource. DMS S3 endpoints can be created, updated, deleted, and imported.

    Note: AWS is deprecating extra_connection_attributes, such as used with aws.dms.Endpoint. This resource is an alternative to aws.dms.Endpoint and does not use extra_connection_attributes. (AWS currently includes extra_connection_attributes in the raw responses to the AWS Provider requests and so they may be visible in the logs.)

    Note: Some of this resource’s arguments have default values that come from the AWS Provider. Other default values are provided by AWS and subject to change without notice. When relying on AWS defaults, the provider state will often have a zero value. For example, the AWS Provider does not provide a default for cdc_max_batch_interval but the AWS default is 60 (seconds). However, the provider state will show 0 since this is the value return by AWS when no value is present. Below, we aim to flag the defaults that come from AWS (e.g., “AWS default…”).

    Example Usage

    Minimal Configuration

    This is the minimal configuration for an aws.dms.S3Endpoint. This endpoint will rely on the AWS Provider and AWS defaults.

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const example = new aws.dms.S3Endpoint("example", {
        endpointId: "donnedtipi",
        endpointType: "target",
        bucketName: "beckut_name",
        serviceAccessRoleArn: exampleAwsIamRole.arn,
    }, {
        dependsOn: [exampleAwsIamRolePolicy],
    });
    
    import pulumi
    import pulumi_aws as aws
    
    example = aws.dms.S3Endpoint("example",
        endpoint_id="donnedtipi",
        endpoint_type="target",
        bucket_name="beckut_name",
        service_access_role_arn=example_aws_iam_role["arn"],
        opts = pulumi.ResourceOptions(depends_on=[example_aws_iam_role_policy]))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/dms"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dms.NewS3Endpoint(ctx, "example", &dms.S3EndpointArgs{
    			EndpointId:           pulumi.String("donnedtipi"),
    			EndpointType:         pulumi.String("target"),
    			BucketName:           pulumi.String("beckut_name"),
    			ServiceAccessRoleArn: pulumi.Any(exampleAwsIamRole.Arn),
    		}, pulumi.DependsOn([]pulumi.Resource{
    			exampleAwsIamRolePolicy,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Aws.Dms.S3Endpoint("example", new()
        {
            EndpointId = "donnedtipi",
            EndpointType = "target",
            BucketName = "beckut_name",
            ServiceAccessRoleArn = exampleAwsIamRole.Arn,
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                exampleAwsIamRolePolicy,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.dms.S3Endpoint;
    import com.pulumi.aws.dms.S3EndpointArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new S3Endpoint("example", S3EndpointArgs.builder()
                .endpointId("donnedtipi")
                .endpointType("target")
                .bucketName("beckut_name")
                .serviceAccessRoleArn(exampleAwsIamRole.arn())
                .build(), CustomResourceOptions.builder()
                    .dependsOn(exampleAwsIamRolePolicy)
                    .build());
    
        }
    }
    
    resources:
      example:
        type: aws:dms:S3Endpoint
        properties:
          endpointId: donnedtipi
          endpointType: target
          bucketName: beckut_name
          serviceAccessRoleArn: ${exampleAwsIamRole.arn}
        options:
          dependson:
            - ${exampleAwsIamRolePolicy}
    

    Complete Configuration

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const example = new aws.dms.S3Endpoint("example", {
        endpointId: "donnedtipi",
        endpointType: "target",
        sslMode: "none",
        tags: {
            Name: "donnedtipi",
            Update: "to-update",
            Remove: "to-remove",
        },
        addColumnName: true,
        addTrailingPaddingCharacter: false,
        bucketFolder: "folder",
        bucketName: "bucket_name",
        cannedAclForObjects: "private",
        cdcInsertsAndUpdates: true,
        cdcInsertsOnly: false,
        cdcMaxBatchInterval: 100,
        cdcMinFileSize: 16,
        cdcPath: "cdc/path",
        compressionType: "GZIP",
        csvDelimiter: ";",
        csvNoSupValue: "x",
        csvNullValue: "?",
        csvRowDelimiter: "\\r\\n",
        dataFormat: "parquet",
        dataPageSize: 1100000,
        datePartitionDelimiter: "UNDERSCORE",
        datePartitionEnabled: true,
        datePartitionSequence: "yyyymmddhh",
        datePartitionTimezone: "Asia/Seoul",
        dictPageSizeLimit: 1000000,
        enableStatistics: false,
        encodingType: "plain",
        encryptionMode: "SSE_S3",
        expectedBucketOwner: current.accountId,
        externalTableDefinition: "etd",
        ignoreHeaderRows: 1,
        includeOpForFullLoad: true,
        maxFileSize: 1000000,
        parquetTimestampInMillisecond: true,
        parquetVersion: "parquet-2-0",
        preserveTransactions: false,
        rfc4180: false,
        rowGroupLength: 11000,
        serverSideEncryptionKmsKeyId: exampleAwsKmsKey.arn,
        serviceAccessRoleArn: exampleAwsIamRole.arn,
        timestampColumnName: "tx_commit_time",
        useCsvNoSupValue: false,
        useTaskStartTimeForFullLoadTimestamp: true,
        glueCatalogGeneration: true,
    }, {
        dependsOn: [exampleAwsIamRolePolicy],
    });
    
    import pulumi
    import pulumi_aws as aws
    
    example = aws.dms.S3Endpoint("example",
        endpoint_id="donnedtipi",
        endpoint_type="target",
        ssl_mode="none",
        tags={
            "Name": "donnedtipi",
            "Update": "to-update",
            "Remove": "to-remove",
        },
        add_column_name=True,
        add_trailing_padding_character=False,
        bucket_folder="folder",
        bucket_name="bucket_name",
        canned_acl_for_objects="private",
        cdc_inserts_and_updates=True,
        cdc_inserts_only=False,
        cdc_max_batch_interval=100,
        cdc_min_file_size=16,
        cdc_path="cdc/path",
        compression_type="GZIP",
        csv_delimiter=";",
        csv_no_sup_value="x",
        csv_null_value="?",
        csv_row_delimiter="\\r\\n",
        data_format="parquet",
        data_page_size=1100000,
        date_partition_delimiter="UNDERSCORE",
        date_partition_enabled=True,
        date_partition_sequence="yyyymmddhh",
        date_partition_timezone="Asia/Seoul",
        dict_page_size_limit=1000000,
        enable_statistics=False,
        encoding_type="plain",
        encryption_mode="SSE_S3",
        expected_bucket_owner=current["accountId"],
        external_table_definition="etd",
        ignore_header_rows=1,
        include_op_for_full_load=True,
        max_file_size=1000000,
        parquet_timestamp_in_millisecond=True,
        parquet_version="parquet-2-0",
        preserve_transactions=False,
        rfc4180=False,
        row_group_length=11000,
        server_side_encryption_kms_key_id=example_aws_kms_key["arn"],
        service_access_role_arn=example_aws_iam_role["arn"],
        timestamp_column_name="tx_commit_time",
        use_csv_no_sup_value=False,
        use_task_start_time_for_full_load_timestamp=True,
        glue_catalog_generation=True,
        opts = pulumi.ResourceOptions(depends_on=[example_aws_iam_role_policy]))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/dms"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dms.NewS3Endpoint(ctx, "example", &dms.S3EndpointArgs{
    			EndpointId:   pulumi.String("donnedtipi"),
    			EndpointType: pulumi.String("target"),
    			SslMode:      pulumi.String("none"),
    			Tags: pulumi.StringMap{
    				"Name":   pulumi.String("donnedtipi"),
    				"Update": pulumi.String("to-update"),
    				"Remove": pulumi.String("to-remove"),
    			},
    			AddColumnName:                        pulumi.Bool(true),
    			AddTrailingPaddingCharacter:          pulumi.Bool(false),
    			BucketFolder:                         pulumi.String("folder"),
    			BucketName:                           pulumi.String("bucket_name"),
    			CannedAclForObjects:                  pulumi.String("private"),
    			CdcInsertsAndUpdates:                 pulumi.Bool(true),
    			CdcInsertsOnly:                       pulumi.Bool(false),
    			CdcMaxBatchInterval:                  pulumi.Int(100),
    			CdcMinFileSize:                       pulumi.Int(16),
    			CdcPath:                              pulumi.String("cdc/path"),
    			CompressionType:                      pulumi.String("GZIP"),
    			CsvDelimiter:                         pulumi.String(";"),
    			CsvNoSupValue:                        pulumi.String("x"),
    			CsvNullValue:                         pulumi.String("?"),
    			CsvRowDelimiter:                      pulumi.String("\\r\\n"),
    			DataFormat:                           pulumi.String("parquet"),
    			DataPageSize:                         pulumi.Int(1100000),
    			DatePartitionDelimiter:               pulumi.String("UNDERSCORE"),
    			DatePartitionEnabled:                 pulumi.Bool(true),
    			DatePartitionSequence:                pulumi.String("yyyymmddhh"),
    			DatePartitionTimezone:                pulumi.String("Asia/Seoul"),
    			DictPageSizeLimit:                    pulumi.Int(1000000),
    			EnableStatistics:                     pulumi.Bool(false),
    			EncodingType:                         pulumi.String("plain"),
    			EncryptionMode:                       pulumi.String("SSE_S3"),
    			ExpectedBucketOwner:                  pulumi.Any(current.AccountId),
    			ExternalTableDefinition:              pulumi.String("etd"),
    			IgnoreHeaderRows:                     pulumi.Int(1),
    			IncludeOpForFullLoad:                 pulumi.Bool(true),
    			MaxFileSize:                          pulumi.Int(1000000),
    			ParquetTimestampInMillisecond:        pulumi.Bool(true),
    			ParquetVersion:                       pulumi.String("parquet-2-0"),
    			PreserveTransactions:                 pulumi.Bool(false),
    			Rfc4180:                              pulumi.Bool(false),
    			RowGroupLength:                       pulumi.Int(11000),
    			ServerSideEncryptionKmsKeyId:         pulumi.Any(exampleAwsKmsKey.Arn),
    			ServiceAccessRoleArn:                 pulumi.Any(exampleAwsIamRole.Arn),
    			TimestampColumnName:                  pulumi.String("tx_commit_time"),
    			UseCsvNoSupValue:                     pulumi.Bool(false),
    			UseTaskStartTimeForFullLoadTimestamp: pulumi.Bool(true),
    			GlueCatalogGeneration:                pulumi.Bool(true),
    		}, pulumi.DependsOn([]pulumi.Resource{
    			exampleAwsIamRolePolicy,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Aws.Dms.S3Endpoint("example", new()
        {
            EndpointId = "donnedtipi",
            EndpointType = "target",
            SslMode = "none",
            Tags = 
            {
                { "Name", "donnedtipi" },
                { "Update", "to-update" },
                { "Remove", "to-remove" },
            },
            AddColumnName = true,
            AddTrailingPaddingCharacter = false,
            BucketFolder = "folder",
            BucketName = "bucket_name",
            CannedAclForObjects = "private",
            CdcInsertsAndUpdates = true,
            CdcInsertsOnly = false,
            CdcMaxBatchInterval = 100,
            CdcMinFileSize = 16,
            CdcPath = "cdc/path",
            CompressionType = "GZIP",
            CsvDelimiter = ";",
            CsvNoSupValue = "x",
            CsvNullValue = "?",
            CsvRowDelimiter = "\\r\\n",
            DataFormat = "parquet",
            DataPageSize = 1100000,
            DatePartitionDelimiter = "UNDERSCORE",
            DatePartitionEnabled = true,
            DatePartitionSequence = "yyyymmddhh",
            DatePartitionTimezone = "Asia/Seoul",
            DictPageSizeLimit = 1000000,
            EnableStatistics = false,
            EncodingType = "plain",
            EncryptionMode = "SSE_S3",
            ExpectedBucketOwner = current.AccountId,
            ExternalTableDefinition = "etd",
            IgnoreHeaderRows = 1,
            IncludeOpForFullLoad = true,
            MaxFileSize = 1000000,
            ParquetTimestampInMillisecond = true,
            ParquetVersion = "parquet-2-0",
            PreserveTransactions = false,
            Rfc4180 = false,
            RowGroupLength = 11000,
            ServerSideEncryptionKmsKeyId = exampleAwsKmsKey.Arn,
            ServiceAccessRoleArn = exampleAwsIamRole.Arn,
            TimestampColumnName = "tx_commit_time",
            UseCsvNoSupValue = false,
            UseTaskStartTimeForFullLoadTimestamp = true,
            GlueCatalogGeneration = true,
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                exampleAwsIamRolePolicy,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.dms.S3Endpoint;
    import com.pulumi.aws.dms.S3EndpointArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new S3Endpoint("example", S3EndpointArgs.builder()
                .endpointId("donnedtipi")
                .endpointType("target")
                .sslMode("none")
                .tags(Map.ofEntries(
                    Map.entry("Name", "donnedtipi"),
                    Map.entry("Update", "to-update"),
                    Map.entry("Remove", "to-remove")
                ))
                .addColumnName(true)
                .addTrailingPaddingCharacter(false)
                .bucketFolder("folder")
                .bucketName("bucket_name")
                .cannedAclForObjects("private")
                .cdcInsertsAndUpdates(true)
                .cdcInsertsOnly(false)
                .cdcMaxBatchInterval(100)
                .cdcMinFileSize(16)
                .cdcPath("cdc/path")
                .compressionType("GZIP")
                .csvDelimiter(";")
                .csvNoSupValue("x")
                .csvNullValue("?")
                .csvRowDelimiter("\\r\\n")
                .dataFormat("parquet")
                .dataPageSize(1100000)
                .datePartitionDelimiter("UNDERSCORE")
                .datePartitionEnabled(true)
                .datePartitionSequence("yyyymmddhh")
                .datePartitionTimezone("Asia/Seoul")
                .dictPageSizeLimit(1000000)
                .enableStatistics(false)
                .encodingType("plain")
                .encryptionMode("SSE_S3")
                .expectedBucketOwner(current.accountId())
                .externalTableDefinition("etd")
                .ignoreHeaderRows(1)
                .includeOpForFullLoad(true)
                .maxFileSize(1000000)
                .parquetTimestampInMillisecond(true)
                .parquetVersion("parquet-2-0")
                .preserveTransactions(false)
                .rfc4180(false)
                .rowGroupLength(11000)
                .serverSideEncryptionKmsKeyId(exampleAwsKmsKey.arn())
                .serviceAccessRoleArn(exampleAwsIamRole.arn())
                .timestampColumnName("tx_commit_time")
                .useCsvNoSupValue(false)
                .useTaskStartTimeForFullLoadTimestamp(true)
                .glueCatalogGeneration(true)
                .build(), CustomResourceOptions.builder()
                    .dependsOn(exampleAwsIamRolePolicy)
                    .build());
    
        }
    }
    
    resources:
      example:
        type: aws:dms:S3Endpoint
        properties:
          endpointId: donnedtipi
          endpointType: target
          sslMode: none
          tags:
            Name: donnedtipi
            Update: to-update
            Remove: to-remove
          addColumnName: true
          addTrailingPaddingCharacter: false
          bucketFolder: folder
          bucketName: bucket_name
          cannedAclForObjects: private
          cdcInsertsAndUpdates: true
          cdcInsertsOnly: false
          cdcMaxBatchInterval: 100
          cdcMinFileSize: 16
          cdcPath: cdc/path
          compressionType: GZIP
          csvDelimiter: ;
          csvNoSupValue: x
          csvNullValue: '?'
          csvRowDelimiter: \r\n
          dataFormat: parquet
          dataPageSize: 1.1e+06
          datePartitionDelimiter: UNDERSCORE
          datePartitionEnabled: true
          datePartitionSequence: yyyymmddhh
          datePartitionTimezone: Asia/Seoul
          dictPageSizeLimit: 1e+06
          enableStatistics: false
          encodingType: plain
          encryptionMode: SSE_S3
          expectedBucketOwner: ${current.accountId}
          externalTableDefinition: etd
          ignoreHeaderRows: 1
          includeOpForFullLoad: true
          maxFileSize: 1e+06
          parquetTimestampInMillisecond: true
          parquetVersion: parquet-2-0
          preserveTransactions: false
          rfc4180: false
          rowGroupLength: 11000
          serverSideEncryptionKmsKeyId: ${exampleAwsKmsKey.arn}
          serviceAccessRoleArn: ${exampleAwsIamRole.arn}
          timestampColumnName: tx_commit_time
          useCsvNoSupValue: false
          useTaskStartTimeForFullLoadTimestamp: true
          glueCatalogGeneration: true
        options:
          dependson:
            - ${exampleAwsIamRolePolicy}
    

    Create S3Endpoint Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new S3Endpoint(name: string, args: S3EndpointArgs, opts?: CustomResourceOptions);
    @overload
    def S3Endpoint(resource_name: str,
                   args: S3EndpointArgs,
                   opts: Optional[ResourceOptions] = None)
    
    @overload
    def S3Endpoint(resource_name: str,
                   opts: Optional[ResourceOptions] = None,
                   endpoint_id: Optional[str] = None,
                   service_access_role_arn: Optional[str] = None,
                   endpoint_type: Optional[str] = None,
                   bucket_name: Optional[str] = None,
                   enable_statistics: Optional[bool] = None,
                   cdc_max_batch_interval: Optional[int] = None,
                   cdc_inserts_only: Optional[bool] = None,
                   encoding_type: Optional[str] = None,
                   cdc_min_file_size: Optional[int] = None,
                   cdc_path: Optional[str] = None,
                   certificate_arn: Optional[str] = None,
                   compression_type: Optional[str] = None,
                   csv_delimiter: Optional[str] = None,
                   csv_no_sup_value: Optional[str] = None,
                   add_column_name: Optional[bool] = None,
                   csv_row_delimiter: Optional[str] = None,
                   encryption_mode: Optional[str] = None,
                   data_page_size: Optional[int] = None,
                   date_partition_delimiter: Optional[str] = None,
                   date_partition_enabled: Optional[bool] = None,
                   date_partition_sequence: Optional[str] = None,
                   date_partition_timezone: Optional[str] = None,
                   detach_target_on_lob_lookup_failure_parquet: Optional[bool] = None,
                   dict_page_size_limit: Optional[int] = None,
                   csv_null_value: Optional[str] = None,
                   cdc_inserts_and_updates: Optional[bool] = None,
                   data_format: Optional[str] = None,
                   canned_acl_for_objects: Optional[str] = None,
                   bucket_folder: Optional[str] = None,
                   expected_bucket_owner: Optional[str] = None,
                   external_table_definition: Optional[str] = None,
                   glue_catalog_generation: Optional[bool] = None,
                   ignore_header_rows: Optional[int] = None,
                   include_op_for_full_load: Optional[bool] = None,
                   kms_key_arn: Optional[str] = None,
                   max_file_size: Optional[int] = None,
                   parquet_timestamp_in_millisecond: Optional[bool] = None,
                   parquet_version: Optional[str] = None,
                   preserve_transactions: Optional[bool] = None,
                   rfc4180: Optional[bool] = None,
                   row_group_length: Optional[int] = None,
                   server_side_encryption_kms_key_id: Optional[str] = None,
                   add_trailing_padding_character: Optional[bool] = None,
                   ssl_mode: Optional[str] = None,
                   tags: Optional[Mapping[str, str]] = None,
                   timestamp_column_name: Optional[str] = None,
                   use_csv_no_sup_value: Optional[bool] = None,
                   use_task_start_time_for_full_load_timestamp: Optional[bool] = None)
    func NewS3Endpoint(ctx *Context, name string, args S3EndpointArgs, opts ...ResourceOption) (*S3Endpoint, error)
    public S3Endpoint(string name, S3EndpointArgs args, CustomResourceOptions? opts = null)
    public S3Endpoint(String name, S3EndpointArgs args)
    public S3Endpoint(String name, S3EndpointArgs args, CustomResourceOptions options)
    
    type: aws:dms:S3Endpoint
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args S3EndpointArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args S3EndpointArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args S3EndpointArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args S3EndpointArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args S3EndpointArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var s3endpointResource = new Aws.Dms.S3Endpoint("s3endpointResource", new()
    {
        EndpointId = "string",
        ServiceAccessRoleArn = "string",
        EndpointType = "string",
        BucketName = "string",
        EnableStatistics = false,
        CdcMaxBatchInterval = 0,
        CdcInsertsOnly = false,
        EncodingType = "string",
        CdcMinFileSize = 0,
        CdcPath = "string",
        CertificateArn = "string",
        CompressionType = "string",
        CsvDelimiter = "string",
        CsvNoSupValue = "string",
        AddColumnName = false,
        CsvRowDelimiter = "string",
        EncryptionMode = "string",
        DataPageSize = 0,
        DatePartitionDelimiter = "string",
        DatePartitionEnabled = false,
        DatePartitionSequence = "string",
        DatePartitionTimezone = "string",
        DetachTargetOnLobLookupFailureParquet = false,
        DictPageSizeLimit = 0,
        CsvNullValue = "string",
        CdcInsertsAndUpdates = false,
        DataFormat = "string",
        CannedAclForObjects = "string",
        BucketFolder = "string",
        ExpectedBucketOwner = "string",
        ExternalTableDefinition = "string",
        GlueCatalogGeneration = false,
        IgnoreHeaderRows = 0,
        IncludeOpForFullLoad = false,
        KmsKeyArn = "string",
        MaxFileSize = 0,
        ParquetTimestampInMillisecond = false,
        ParquetVersion = "string",
        PreserveTransactions = false,
        Rfc4180 = false,
        RowGroupLength = 0,
        ServerSideEncryptionKmsKeyId = "string",
        AddTrailingPaddingCharacter = false,
        SslMode = "string",
        Tags = 
        {
            { "string", "string" },
        },
        TimestampColumnName = "string",
        UseCsvNoSupValue = false,
        UseTaskStartTimeForFullLoadTimestamp = false,
    });
    
    example, err := dms.NewS3Endpoint(ctx, "s3endpointResource", &dms.S3EndpointArgs{
    	EndpointId:                            pulumi.String("string"),
    	ServiceAccessRoleArn:                  pulumi.String("string"),
    	EndpointType:                          pulumi.String("string"),
    	BucketName:                            pulumi.String("string"),
    	EnableStatistics:                      pulumi.Bool(false),
    	CdcMaxBatchInterval:                   pulumi.Int(0),
    	CdcInsertsOnly:                        pulumi.Bool(false),
    	EncodingType:                          pulumi.String("string"),
    	CdcMinFileSize:                        pulumi.Int(0),
    	CdcPath:                               pulumi.String("string"),
    	CertificateArn:                        pulumi.String("string"),
    	CompressionType:                       pulumi.String("string"),
    	CsvDelimiter:                          pulumi.String("string"),
    	CsvNoSupValue:                         pulumi.String("string"),
    	AddColumnName:                         pulumi.Bool(false),
    	CsvRowDelimiter:                       pulumi.String("string"),
    	EncryptionMode:                        pulumi.String("string"),
    	DataPageSize:                          pulumi.Int(0),
    	DatePartitionDelimiter:                pulumi.String("string"),
    	DatePartitionEnabled:                  pulumi.Bool(false),
    	DatePartitionSequence:                 pulumi.String("string"),
    	DatePartitionTimezone:                 pulumi.String("string"),
    	DetachTargetOnLobLookupFailureParquet: pulumi.Bool(false),
    	DictPageSizeLimit:                     pulumi.Int(0),
    	CsvNullValue:                          pulumi.String("string"),
    	CdcInsertsAndUpdates:                  pulumi.Bool(false),
    	DataFormat:                            pulumi.String("string"),
    	CannedAclForObjects:                   pulumi.String("string"),
    	BucketFolder:                          pulumi.String("string"),
    	ExpectedBucketOwner:                   pulumi.String("string"),
    	ExternalTableDefinition:               pulumi.String("string"),
    	GlueCatalogGeneration:                 pulumi.Bool(false),
    	IgnoreHeaderRows:                      pulumi.Int(0),
    	IncludeOpForFullLoad:                  pulumi.Bool(false),
    	KmsKeyArn:                             pulumi.String("string"),
    	MaxFileSize:                           pulumi.Int(0),
    	ParquetTimestampInMillisecond:         pulumi.Bool(false),
    	ParquetVersion:                        pulumi.String("string"),
    	PreserveTransactions:                  pulumi.Bool(false),
    	Rfc4180:                               pulumi.Bool(false),
    	RowGroupLength:                        pulumi.Int(0),
    	ServerSideEncryptionKmsKeyId:          pulumi.String("string"),
    	AddTrailingPaddingCharacter:           pulumi.Bool(false),
    	SslMode:                               pulumi.String("string"),
    	Tags: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	TimestampColumnName:                  pulumi.String("string"),
    	UseCsvNoSupValue:                     pulumi.Bool(false),
    	UseTaskStartTimeForFullLoadTimestamp: pulumi.Bool(false),
    })
    
    var s3endpointResource = new S3Endpoint("s3endpointResource", S3EndpointArgs.builder()
        .endpointId("string")
        .serviceAccessRoleArn("string")
        .endpointType("string")
        .bucketName("string")
        .enableStatistics(false)
        .cdcMaxBatchInterval(0)
        .cdcInsertsOnly(false)
        .encodingType("string")
        .cdcMinFileSize(0)
        .cdcPath("string")
        .certificateArn("string")
        .compressionType("string")
        .csvDelimiter("string")
        .csvNoSupValue("string")
        .addColumnName(false)
        .csvRowDelimiter("string")
        .encryptionMode("string")
        .dataPageSize(0)
        .datePartitionDelimiter("string")
        .datePartitionEnabled(false)
        .datePartitionSequence("string")
        .datePartitionTimezone("string")
        .detachTargetOnLobLookupFailureParquet(false)
        .dictPageSizeLimit(0)
        .csvNullValue("string")
        .cdcInsertsAndUpdates(false)
        .dataFormat("string")
        .cannedAclForObjects("string")
        .bucketFolder("string")
        .expectedBucketOwner("string")
        .externalTableDefinition("string")
        .glueCatalogGeneration(false)
        .ignoreHeaderRows(0)
        .includeOpForFullLoad(false)
        .kmsKeyArn("string")
        .maxFileSize(0)
        .parquetTimestampInMillisecond(false)
        .parquetVersion("string")
        .preserveTransactions(false)
        .rfc4180(false)
        .rowGroupLength(0)
        .serverSideEncryptionKmsKeyId("string")
        .addTrailingPaddingCharacter(false)
        .sslMode("string")
        .tags(Map.of("string", "string"))
        .timestampColumnName("string")
        .useCsvNoSupValue(false)
        .useTaskStartTimeForFullLoadTimestamp(false)
        .build());
    
    s3endpoint_resource = aws.dms.S3Endpoint("s3endpointResource",
        endpoint_id="string",
        service_access_role_arn="string",
        endpoint_type="string",
        bucket_name="string",
        enable_statistics=False,
        cdc_max_batch_interval=0,
        cdc_inserts_only=False,
        encoding_type="string",
        cdc_min_file_size=0,
        cdc_path="string",
        certificate_arn="string",
        compression_type="string",
        csv_delimiter="string",
        csv_no_sup_value="string",
        add_column_name=False,
        csv_row_delimiter="string",
        encryption_mode="string",
        data_page_size=0,
        date_partition_delimiter="string",
        date_partition_enabled=False,
        date_partition_sequence="string",
        date_partition_timezone="string",
        detach_target_on_lob_lookup_failure_parquet=False,
        dict_page_size_limit=0,
        csv_null_value="string",
        cdc_inserts_and_updates=False,
        data_format="string",
        canned_acl_for_objects="string",
        bucket_folder="string",
        expected_bucket_owner="string",
        external_table_definition="string",
        glue_catalog_generation=False,
        ignore_header_rows=0,
        include_op_for_full_load=False,
        kms_key_arn="string",
        max_file_size=0,
        parquet_timestamp_in_millisecond=False,
        parquet_version="string",
        preserve_transactions=False,
        rfc4180=False,
        row_group_length=0,
        server_side_encryption_kms_key_id="string",
        add_trailing_padding_character=False,
        ssl_mode="string",
        tags={
            "string": "string",
        },
        timestamp_column_name="string",
        use_csv_no_sup_value=False,
        use_task_start_time_for_full_load_timestamp=False)
    
    const s3endpointResource = new aws.dms.S3Endpoint("s3endpointResource", {
        endpointId: "string",
        serviceAccessRoleArn: "string",
        endpointType: "string",
        bucketName: "string",
        enableStatistics: false,
        cdcMaxBatchInterval: 0,
        cdcInsertsOnly: false,
        encodingType: "string",
        cdcMinFileSize: 0,
        cdcPath: "string",
        certificateArn: "string",
        compressionType: "string",
        csvDelimiter: "string",
        csvNoSupValue: "string",
        addColumnName: false,
        csvRowDelimiter: "string",
        encryptionMode: "string",
        dataPageSize: 0,
        datePartitionDelimiter: "string",
        datePartitionEnabled: false,
        datePartitionSequence: "string",
        datePartitionTimezone: "string",
        detachTargetOnLobLookupFailureParquet: false,
        dictPageSizeLimit: 0,
        csvNullValue: "string",
        cdcInsertsAndUpdates: false,
        dataFormat: "string",
        cannedAclForObjects: "string",
        bucketFolder: "string",
        expectedBucketOwner: "string",
        externalTableDefinition: "string",
        glueCatalogGeneration: false,
        ignoreHeaderRows: 0,
        includeOpForFullLoad: false,
        kmsKeyArn: "string",
        maxFileSize: 0,
        parquetTimestampInMillisecond: false,
        parquetVersion: "string",
        preserveTransactions: false,
        rfc4180: false,
        rowGroupLength: 0,
        serverSideEncryptionKmsKeyId: "string",
        addTrailingPaddingCharacter: false,
        sslMode: "string",
        tags: {
            string: "string",
        },
        timestampColumnName: "string",
        useCsvNoSupValue: false,
        useTaskStartTimeForFullLoadTimestamp: false,
    });
    
    type: aws:dms:S3Endpoint
    properties:
        addColumnName: false
        addTrailingPaddingCharacter: false
        bucketFolder: string
        bucketName: string
        cannedAclForObjects: string
        cdcInsertsAndUpdates: false
        cdcInsertsOnly: false
        cdcMaxBatchInterval: 0
        cdcMinFileSize: 0
        cdcPath: string
        certificateArn: string
        compressionType: string
        csvDelimiter: string
        csvNoSupValue: string
        csvNullValue: string
        csvRowDelimiter: string
        dataFormat: string
        dataPageSize: 0
        datePartitionDelimiter: string
        datePartitionEnabled: false
        datePartitionSequence: string
        datePartitionTimezone: string
        detachTargetOnLobLookupFailureParquet: false
        dictPageSizeLimit: 0
        enableStatistics: false
        encodingType: string
        encryptionMode: string
        endpointId: string
        endpointType: string
        expectedBucketOwner: string
        externalTableDefinition: string
        glueCatalogGeneration: false
        ignoreHeaderRows: 0
        includeOpForFullLoad: false
        kmsKeyArn: string
        maxFileSize: 0
        parquetTimestampInMillisecond: false
        parquetVersion: string
        preserveTransactions: false
        rfc4180: false
        rowGroupLength: 0
        serverSideEncryptionKmsKeyId: string
        serviceAccessRoleArn: string
        sslMode: string
        tags:
            string: string
        timestampColumnName: string
        useCsvNoSupValue: false
        useTaskStartTimeForFullLoadTimestamp: false
    

    S3Endpoint Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The S3Endpoint resource accepts the following input properties:

    BucketName string
    S3 bucket name.
    EndpointId string
    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
    EndpointType string
    Type of endpoint. Valid values are source, target.
    ServiceAccessRoleArn string

    ARN of the IAM role with permissions to the S3 Bucket.

    The following arguments are optional:

    AddColumnName bool
    Whether to add column name information to the .csv output file. Default is false.
    AddTrailingPaddingCharacter bool
    Whether to add padding. Default is false. (Ignored for source endpoints.)
    BucketFolder string
    S3 object prefix.
    CannedAclForObjects string
    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
    CdcInsertsAndUpdates bool
    Whether to write insert and update operations to .csv or .parquet output files. Default is false.
    CdcInsertsOnly bool
    Whether to write insert operations to .csv or .parquet output files. Default is false.
    CdcMaxBatchInterval int
    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is 60.)
    CdcMinFileSize int
    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. (AWS default is 32000 KB.)
    CdcPath string
    Folder path of CDC files. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
    CertificateArn string
    ARN for the certificate.
    CompressionType string
    Set to compress target files. Valid values are GZIP and NONE. Default is NONE. (Ignored for source endpoints.)
    CsvDelimiter string
    Delimiter used to separate columns in the source files. Default is ,.
    CsvNoSupValue string
    Only applies if output files for a CDC load are written in .csv format. If use_csv_no_sup_value is set to true, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of use_csv_no_sup_value. (Ignored for source endpoints.)
    CsvNullValue string
    String to as null when writing to the target. (AWS default is NULL.)
    CsvRowDelimiter string
    Delimiter used to separate rows in the source files. Default is newline (i.e., \n).
    DataFormat string
    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. (Ignored for source endpoints -- only csv is valid.)
    DataPageSize int
    Size of one data page in bytes. (AWS default is 1 MiB, i.e., 1048576.)
    DatePartitionDelimiter string
    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. (AWS default is SLASH.) (Ignored for source endpoints.)
    DatePartitionEnabled bool
    Partition S3 bucket folders based on transaction commit dates. Default is false. (Ignored for source endpoints.)
    DatePartitionSequence string
    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. (AWS default is YYYYMMDD.) (Ignored for source endpoints.)
    DatePartitionTimezone string
    Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (e.g., Europe/Paris). Use this when date_partition_enabled is true. (Ignored for source endpoints.)
    DetachTargetOnLobLookupFailureParquet bool
    Undocumented argument for use as directed by AWS Support.
    DictPageSizeLimit int
    Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, i.e., 1048576.)
    EnableStatistics bool
    Whether to enable statistics for Parquet pages and row groups. Default is true.
    EncodingType string
    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. (AWS default is rle_dictionary.)
    EncryptionMode string
    Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. (AWS default is SSE_S3.) (Ignored for source endpoints -- only SSE_S3 is valid.)
    ExpectedBucketOwner string
    Bucket owner to prevent sniping. Value is an AWS account ID.
    ExternalTableDefinition string
    JSON document that describes how AWS DMS should interpret the data.
    GlueCatalogGeneration bool
    Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
    IgnoreHeaderRows int
    When this value is set to 1, DMS ignores the first row header in a .csv file. (AWS default is 0.)
    IncludeOpForFullLoad bool
    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
    KmsKeyArn string
    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
    MaxFileSize int
    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. (AWS default is 1 GB, i.e., 1048576.)
    ParquetTimestampInMillisecond bool
    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. (Ignored for source endpoints.)
    ParquetVersion string
    Version of the .parquet file format. Valid values are parquet-1-0 and parquet-2-0. (AWS default is parquet-1-0.) (Ignored for source endpoints.)
    PreserveTransactions bool
    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. (Ignored for source endpoints.)
    Rfc4180 bool
    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
    RowGroupLength int
    Number of rows in a row group. (AWS default is 10000.)
    ServerSideEncryptionKmsKeyId string
    When encryption_mode is SSE_KMS, ARN for the AWS KMS key. (Ignored for source endpoints -- only SSE_S3 encryption_mode is valid.)
    SslMode string
    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full. (AWS default is none.)
    Tags Dictionary<string, string>
    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    TimestampColumnName string
    Column to add with timestamp information to the endpoint data for an Amazon S3 target.
    UseCsvNoSupValue bool
    Whether to use csv_no_sup_value for columns not included in the supplemental log. (Ignored for source endpoints.)
    UseTaskStartTimeForFullLoadTimestamp bool
    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time.When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
    BucketName string
    S3 bucket name.
    EndpointId string
    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
    EndpointType string
    Type of endpoint. Valid values are source, target.
    ServiceAccessRoleArn string

    ARN of the IAM role with permissions to the S3 Bucket.

    The following arguments are optional:

    AddColumnName bool
    Whether to add column name information to the .csv output file. Default is false.
    AddTrailingPaddingCharacter bool
    Whether to add padding. Default is false. (Ignored for source endpoints.)
    BucketFolder string
    S3 object prefix.
    CannedAclForObjects string
    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
    CdcInsertsAndUpdates bool
    Whether to write insert and update operations to .csv or .parquet output files. Default is false.
    CdcInsertsOnly bool
    Whether to write insert operations to .csv or .parquet output files. Default is false.
    CdcMaxBatchInterval int
    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is 60.)
    CdcMinFileSize int
    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. (AWS default is 32000 KB.)
    CdcPath string
    Folder path of CDC files. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
    CertificateArn string
    ARN for the certificate.
    CompressionType string
    Set to compress target files. Valid values are GZIP and NONE. Default is NONE. (Ignored for source endpoints.)
    CsvDelimiter string
    Delimiter used to separate columns in the source files. Default is ,.
    CsvNoSupValue string
    Only applies if output files for a CDC load are written in .csv format. If use_csv_no_sup_value is set to true, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of use_csv_no_sup_value. (Ignored for source endpoints.)
    CsvNullValue string
    String to as null when writing to the target. (AWS default is NULL.)
    CsvRowDelimiter string
    Delimiter used to separate rows in the source files. Default is newline (i.e., \n).
    DataFormat string
    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. (Ignored for source endpoints -- only csv is valid.)
    DataPageSize int
    Size of one data page in bytes. (AWS default is 1 MiB, i.e., 1048576.)
    DatePartitionDelimiter string
    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. (AWS default is SLASH.) (Ignored for source endpoints.)
    DatePartitionEnabled bool
    Partition S3 bucket folders based on transaction commit dates. Default is false. (Ignored for source endpoints.)
    DatePartitionSequence string
    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. (AWS default is YYYYMMDD.) (Ignored for source endpoints.)
    DatePartitionTimezone string
    Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (e.g., Europe/Paris). Use this when date_partition_enabled is true. (Ignored for source endpoints.)
    DetachTargetOnLobLookupFailureParquet bool
    Undocumented argument for use as directed by AWS Support.
    DictPageSizeLimit int
    Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, i.e., 1048576.)
    EnableStatistics bool
    Whether to enable statistics for Parquet pages and row groups. Default is true.
    EncodingType string
    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. (AWS default is rle_dictionary.)
    EncryptionMode string
    Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. (AWS default is SSE_S3.) (Ignored for source endpoints -- only SSE_S3 is valid.)
    ExpectedBucketOwner string
    Bucket owner to prevent sniping. Value is an AWS account ID.
    ExternalTableDefinition string
    JSON document that describes how AWS DMS should interpret the data.
    GlueCatalogGeneration bool
    Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
    IgnoreHeaderRows int
    When this value is set to 1, DMS ignores the first row header in a .csv file. (AWS default is 0.)
    IncludeOpForFullLoad bool
    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
    KmsKeyArn string
    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
    MaxFileSize int
    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. (AWS default is 1 GB, i.e., 1048576.)
    ParquetTimestampInMillisecond bool
    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. (Ignored for source endpoints.)
    ParquetVersion string
    Version of the .parquet file format. Valid values are parquet-1-0 and parquet-2-0. (AWS default is parquet-1-0.) (Ignored for source endpoints.)
    PreserveTransactions bool
    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. (Ignored for source endpoints.)
    Rfc4180 bool
    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
    RowGroupLength int
    Number of rows in a row group. (AWS default is 10000.)
    ServerSideEncryptionKmsKeyId string
    When encryption_mode is SSE_KMS, ARN for the AWS KMS key. (Ignored for source endpoints -- only SSE_S3 encryption_mode is valid.)
    SslMode string
    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full. (AWS default is none.)
    Tags map[string]string
    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    TimestampColumnName string
    Column to add with timestamp information to the endpoint data for an Amazon S3 target.
    UseCsvNoSupValue bool
    Whether to use csv_no_sup_value for columns not included in the supplemental log. (Ignored for source endpoints.)
    UseTaskStartTimeForFullLoadTimestamp bool
    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time.When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
    bucketName String
    S3 bucket name.
    endpointId String
    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
    endpointType String
    Type of endpoint. Valid values are source, target.
    serviceAccessRoleArn String

    ARN of the IAM role with permissions to the S3 Bucket.

    The following arguments are optional:

    addColumnName Boolean
    Whether to add column name information to the .csv output file. Default is false.
    addTrailingPaddingCharacter Boolean
    Whether to add padding. Default is false. (Ignored for source endpoints.)
    bucketFolder String
    S3 object prefix.
    cannedAclForObjects String
    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
    cdcInsertsAndUpdates Boolean
    Whether to write insert and update operations to .csv or .parquet output files. Default is false.
    cdcInsertsOnly Boolean
    Whether to write insert operations to .csv or .parquet output files. Default is false.
    cdcMaxBatchInterval Integer
    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is 60.)
    cdcMinFileSize Integer
    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. (AWS default is 32000 KB.)
    cdcPath String
    Folder path of CDC files. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
    certificateArn String
    ARN for the certificate.
    compressionType String
    Set to compress target files. Valid values are GZIP and NONE. Default is NONE. (Ignored for source endpoints.)
    csvDelimiter String
    Delimiter used to separate columns in the source files. Default is ,.
    csvNoSupValue String
    Only applies if output files for a CDC load are written in .csv format. If use_csv_no_sup_value is set to true, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of use_csv_no_sup_value. (Ignored for source endpoints.)
    csvNullValue String
    String to as null when writing to the target. (AWS default is NULL.)
    csvRowDelimiter String
    Delimiter used to separate rows in the source files. Default is newline (i.e., \n).
    dataFormat String
    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. (Ignored for source endpoints -- only csv is valid.)
    dataPageSize Integer
    Size of one data page in bytes. (AWS default is 1 MiB, i.e., 1048576.)
    datePartitionDelimiter String
    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. (AWS default is SLASH.) (Ignored for source endpoints.)
    datePartitionEnabled Boolean
    Partition S3 bucket folders based on transaction commit dates. Default is false. (Ignored for source endpoints.)
    datePartitionSequence String
    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. (AWS default is YYYYMMDD.) (Ignored for source endpoints.)
    datePartitionTimezone String
    Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (e.g., Europe/Paris). Use this when date_partition_enabled is true. (Ignored for source endpoints.)
    detachTargetOnLobLookupFailureParquet Boolean
    Undocumented argument for use as directed by AWS Support.
    dictPageSizeLimit Integer
    Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, i.e., 1048576.)
    enableStatistics Boolean
    Whether to enable statistics for Parquet pages and row groups. Default is true.
    encodingType String
    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. (AWS default is rle_dictionary.)
    encryptionMode String
    Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. (AWS default is SSE_S3.) (Ignored for source endpoints -- only SSE_S3 is valid.)
    expectedBucketOwner String
    Bucket owner to prevent sniping. Value is an AWS account ID.
    externalTableDefinition String
    JSON document that describes how AWS DMS should interpret the data.
    glueCatalogGeneration Boolean
    Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
    ignoreHeaderRows Integer
    When this value is set to 1, DMS ignores the first row header in a .csv file. (AWS default is 0.)
    includeOpForFullLoad Boolean
    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
    kmsKeyArn String
    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
    maxFileSize Integer
    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. (AWS default is 1 GB, i.e., 1048576.)
    parquetTimestampInMillisecond Boolean
    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. (Ignored for source endpoints.)
    parquetVersion String
    Version of the .parquet file format. Valid values are parquet-1-0 and parquet-2-0. (AWS default is parquet-1-0.) (Ignored for source endpoints.)
    preserveTransactions Boolean
    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. (Ignored for source endpoints.)
    rfc4180 Boolean
    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
    rowGroupLength Integer
    Number of rows in a row group. (AWS default is 10000.)
    serverSideEncryptionKmsKeyId String
    When encryption_mode is SSE_KMS, ARN for the AWS KMS key. (Ignored for source endpoints -- only SSE_S3 encryption_mode is valid.)
    sslMode String
    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full. (AWS default is none.)
    tags Map<String,String>
    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    timestampColumnName String
    Column to add with timestamp information to the endpoint data for an Amazon S3 target.
    useCsvNoSupValue Boolean
    Whether to use csv_no_sup_value for columns not included in the supplemental log. (Ignored for source endpoints.)
    useTaskStartTimeForFullLoadTimestamp Boolean
    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time.When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
    bucketName string
    S3 bucket name.
    endpointId string
    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
    endpointType string
    Type of endpoint. Valid values are source, target.
    serviceAccessRoleArn string

    ARN of the IAM role with permissions to the S3 Bucket.

    The following arguments are optional:

    addColumnName boolean
    Whether to add column name information to the .csv output file. Default is false.
    addTrailingPaddingCharacter boolean
    Whether to add padding. Default is false. (Ignored for source endpoints.)
    bucketFolder string
    S3 object prefix.
    cannedAclForObjects string
    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
    cdcInsertsAndUpdates boolean
    Whether to write insert and update operations to .csv or .parquet output files. Default is false.
    cdcInsertsOnly boolean
    Whether to write insert operations to .csv or .parquet output files. Default is false.
    cdcMaxBatchInterval number
    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is 60.)
    cdcMinFileSize number
    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. (AWS default is 32000 KB.)
    cdcPath string
    Folder path of CDC files. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
    certificateArn string
    ARN for the certificate.
    compressionType string
    Set to compress target files. Valid values are GZIP and NONE. Default is NONE. (Ignored for source endpoints.)
    csvDelimiter string
    Delimiter used to separate columns in the source files. Default is ,.
    csvNoSupValue string
    Only applies if output files for a CDC load are written in .csv format. If use_csv_no_sup_value is set to true, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of use_csv_no_sup_value. (Ignored for source endpoints.)
    csvNullValue string
    String to as null when writing to the target. (AWS default is NULL.)
    csvRowDelimiter string
    Delimiter used to separate rows in the source files. Default is newline (i.e., \n).
    dataFormat string
    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. (Ignored for source endpoints -- only csv is valid.)
    dataPageSize number
    Size of one data page in bytes. (AWS default is 1 MiB, i.e., 1048576.)
    datePartitionDelimiter string
    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. (AWS default is SLASH.) (Ignored for source endpoints.)
    datePartitionEnabled boolean
    Partition S3 bucket folders based on transaction commit dates. Default is false. (Ignored for source endpoints.)
    datePartitionSequence string
    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. (AWS default is YYYYMMDD.) (Ignored for source endpoints.)
    datePartitionTimezone string
    Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (e.g., Europe/Paris). Use this when date_partition_enabled is true. (Ignored for source endpoints.)
    detachTargetOnLobLookupFailureParquet boolean
    Undocumented argument for use as directed by AWS Support.
    dictPageSizeLimit number
    Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, i.e., 1048576.)
    enableStatistics boolean
    Whether to enable statistics for Parquet pages and row groups. Default is true.
    encodingType string
    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. (AWS default is rle_dictionary.)
    encryptionMode string
    Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. (AWS default is SSE_S3.) (Ignored for source endpoints -- only SSE_S3 is valid.)
    expectedBucketOwner string
    Bucket owner to prevent sniping. Value is an AWS account ID.
    externalTableDefinition string
    JSON document that describes how AWS DMS should interpret the data.
    glueCatalogGeneration boolean
    Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
    ignoreHeaderRows number
    When this value is set to 1, DMS ignores the first row header in a .csv file. (AWS default is 0.)
    includeOpForFullLoad boolean
    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
    kmsKeyArn string
    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
    maxFileSize number
    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. (AWS default is 1 GB, i.e., 1048576.)
    parquetTimestampInMillisecond boolean
    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. (Ignored for source endpoints.)
    parquetVersion string
    Version of the .parquet file format. Valid values are parquet-1-0 and parquet-2-0. (AWS default is parquet-1-0.) (Ignored for source endpoints.)
    preserveTransactions boolean
    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. (Ignored for source endpoints.)
    rfc4180 boolean
    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
    rowGroupLength number
    Number of rows in a row group. (AWS default is 10000.)
    serverSideEncryptionKmsKeyId string
    When encryption_mode is SSE_KMS, ARN for the AWS KMS key. (Ignored for source endpoints -- only SSE_S3 encryption_mode is valid.)
    sslMode string
    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full. (AWS default is none.)
    tags {[key: string]: string}
    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    timestampColumnName string
    Column to add with timestamp information to the endpoint data for an Amazon S3 target.
    useCsvNoSupValue boolean
    Whether to use csv_no_sup_value for columns not included in the supplemental log. (Ignored for source endpoints.)
    useTaskStartTimeForFullLoadTimestamp boolean
    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time.When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
    bucket_name str
    S3 bucket name.
    endpoint_id str
    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
    endpoint_type str
    Type of endpoint. Valid values are source, target.
    service_access_role_arn str

    ARN of the IAM role with permissions to the S3 Bucket.

    The following arguments are optional:

    add_column_name bool
    Whether to add column name information to the .csv output file. Default is false.
    add_trailing_padding_character bool
    Whether to add padding. Default is false. (Ignored for source endpoints.)
    bucket_folder str
    S3 object prefix.
    canned_acl_for_objects str
    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
    cdc_inserts_and_updates bool
    Whether to write insert and update operations to .csv or .parquet output files. Default is false.
    cdc_inserts_only bool
    Whether to write insert operations to .csv or .parquet output files. Default is false.
    cdc_max_batch_interval int
    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is 60.)
    cdc_min_file_size int
    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. (AWS default is 32000 KB.)
    cdc_path str
    Folder path of CDC files. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
    certificate_arn str
    ARN for the certificate.
    compression_type str
    Set to compress target files. Valid values are GZIP and NONE. Default is NONE. (Ignored for source endpoints.)
    csv_delimiter str
    Delimiter used to separate columns in the source files. Default is ,.
    csv_no_sup_value str
    Only applies if output files for a CDC load are written in .csv format. If use_csv_no_sup_value is set to true, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of use_csv_no_sup_value. (Ignored for source endpoints.)
    csv_null_value str
    String to as null when writing to the target. (AWS default is NULL.)
    csv_row_delimiter str
    Delimiter used to separate rows in the source files. Default is newline (i.e., \n).
    data_format str
    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. (Ignored for source endpoints -- only csv is valid.)
    data_page_size int
    Size of one data page in bytes. (AWS default is 1 MiB, i.e., 1048576.)
    date_partition_delimiter str
    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. (AWS default is SLASH.) (Ignored for source endpoints.)
    date_partition_enabled bool
    Partition S3 bucket folders based on transaction commit dates. Default is false. (Ignored for source endpoints.)
    date_partition_sequence str
    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. (AWS default is YYYYMMDD.) (Ignored for source endpoints.)
    date_partition_timezone str
    Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (e.g., Europe/Paris). Use this when date_partition_enabled is true. (Ignored for source endpoints.)
    detach_target_on_lob_lookup_failure_parquet bool
    Undocumented argument for use as directed by AWS Support.
    dict_page_size_limit int
    Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, i.e., 1048576.)
    enable_statistics bool
    Whether to enable statistics for Parquet pages and row groups. Default is true.
    encoding_type str
    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. (AWS default is rle_dictionary.)
    encryption_mode str
    Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. (AWS default is SSE_S3.) (Ignored for source endpoints -- only SSE_S3 is valid.)
    expected_bucket_owner str
    Bucket owner to prevent sniping. Value is an AWS account ID.
    external_table_definition str
    JSON document that describes how AWS DMS should interpret the data.
    glue_catalog_generation bool
    Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
    ignore_header_rows int
    When this value is set to 1, DMS ignores the first row header in a .csv file. (AWS default is 0.)
    include_op_for_full_load bool
    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
    kms_key_arn str
    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
    max_file_size int
    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. (AWS default is 1 GB, i.e., 1048576.)
    parquet_timestamp_in_millisecond bool
    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. (Ignored for source endpoints.)
    parquet_version str
    Version of the .parquet file format. Valid values are parquet-1-0 and parquet-2-0. (AWS default is parquet-1-0.) (Ignored for source endpoints.)
    preserve_transactions bool
    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. (Ignored for source endpoints.)
    rfc4180 bool
    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
    row_group_length int
    Number of rows in a row group. (AWS default is 10000.)
    server_side_encryption_kms_key_id str
    When encryption_mode is SSE_KMS, ARN for the AWS KMS key. (Ignored for source endpoints -- only SSE_S3 encryption_mode is valid.)
    ssl_mode str
    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full. (AWS default is none.)
    tags Mapping[str, str]
    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    timestamp_column_name str
    Column to add with timestamp information to the endpoint data for an Amazon S3 target.
    use_csv_no_sup_value bool
    Whether to use csv_no_sup_value for columns not included in the supplemental log. (Ignored for source endpoints.)
    use_task_start_time_for_full_load_timestamp bool
    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time.When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
    bucketName String
    S3 bucket name.
    endpointId String
    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
    endpointType String
    Type of endpoint. Valid values are source, target.
    serviceAccessRoleArn String

    ARN of the IAM role with permissions to the S3 Bucket.

    The following arguments are optional:

    addColumnName Boolean
    Whether to add column name information to the .csv output file. Default is false.
    addTrailingPaddingCharacter Boolean
    Whether to add padding. Default is false. (Ignored for source endpoints.)
    bucketFolder String
    S3 object prefix.
    cannedAclForObjects String
    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
    cdcInsertsAndUpdates Boolean
    Whether to write insert and update operations to .csv or .parquet output files. Default is false.
    cdcInsertsOnly Boolean
    Whether to write insert operations to .csv or .parquet output files. Default is false.
    cdcMaxBatchInterval Number
    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is 60.)
    cdcMinFileSize Number
    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. (AWS default is 32000 KB.)
    cdcPath String
    Folder path of CDC files. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
    certificateArn String
    ARN for the certificate.
    compressionType String
    Set to compress target files. Valid values are GZIP and NONE. Default is NONE. (Ignored for source endpoints.)
    csvDelimiter String
    Delimiter used to separate columns in the source files. Default is ,.
    csvNoSupValue String
    Only applies if output files for a CDC load are written in .csv format. If use_csv_no_sup_value is set to true, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of use_csv_no_sup_value. (Ignored for source endpoints.)
    csvNullValue String
    String to as null when writing to the target. (AWS default is NULL.)
    csvRowDelimiter String
    Delimiter used to separate rows in the source files. Default is newline (i.e., \n).
    dataFormat String
    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. (Ignored for source endpoints -- only csv is valid.)
    dataPageSize Number
    Size of one data page in bytes. (AWS default is 1 MiB, i.e., 1048576.)
    datePartitionDelimiter String
    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. (AWS default is SLASH.) (Ignored for source endpoints.)
    datePartitionEnabled Boolean
    Partition S3 bucket folders based on transaction commit dates. Default is false. (Ignored for source endpoints.)
    datePartitionSequence String
    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. (AWS default is YYYYMMDD.) (Ignored for source endpoints.)
    datePartitionTimezone String
    Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (e.g., Europe/Paris). Use this when date_partition_enabled is true. (Ignored for source endpoints.)
    detachTargetOnLobLookupFailureParquet Boolean
    Undocumented argument for use as directed by AWS Support.
    dictPageSizeLimit Number
    Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, i.e., 1048576.)
    enableStatistics Boolean
    Whether to enable statistics for Parquet pages and row groups. Default is true.
    encodingType String
    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. (AWS default is rle_dictionary.)
    encryptionMode String
    Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. (AWS default is SSE_S3.) (Ignored for source endpoints -- only SSE_S3 is valid.)
    expectedBucketOwner String
    Bucket owner to prevent sniping. Value is an AWS account ID.
    externalTableDefinition String
    JSON document that describes how AWS DMS should interpret the data.
    glueCatalogGeneration Boolean
    Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
    ignoreHeaderRows Number
    When this value is set to 1, DMS ignores the first row header in a .csv file. (AWS default is 0.)
    includeOpForFullLoad Boolean
    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
    kmsKeyArn String
    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
    maxFileSize Number
    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. (AWS default is 1 GB, i.e., 1048576.)
    parquetTimestampInMillisecond Boolean
    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. (Ignored for source endpoints.)
    parquetVersion String
    Version of the .parquet file format. Valid values are parquet-1-0 and parquet-2-0. (AWS default is parquet-1-0.) (Ignored for source endpoints.)
    preserveTransactions Boolean
    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. (Ignored for source endpoints.)
    rfc4180 Boolean
    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
    rowGroupLength Number
    Number of rows in a row group. (AWS default is 10000.)
    serverSideEncryptionKmsKeyId String
    When encryption_mode is SSE_KMS, ARN for the AWS KMS key. (Ignored for source endpoints -- only SSE_S3 encryption_mode is valid.)
    sslMode String
    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full. (AWS default is none.)
    tags Map<String>
    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    timestampColumnName String
    Column to add with timestamp information to the endpoint data for an Amazon S3 target.
    useCsvNoSupValue Boolean
    Whether to use csv_no_sup_value for columns not included in the supplemental log. (Ignored for source endpoints.)
    useTaskStartTimeForFullLoadTimestamp Boolean
    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time.When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the S3Endpoint resource produces the following output properties:

    EndpointArn string
    ARN for the endpoint.
    EngineDisplayName string
    Expanded name for the engine name.
    ExternalId string
    Can be used for cross-account validation. Use it in another account with aws.dms.S3Endpoint to create the endpoint cross-account.
    Id string
    The provider-assigned unique ID for this managed resource.
    Status string
    Status of the endpoint.
    TagsAll Dictionary<string, string>
    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    EndpointArn string
    ARN for the endpoint.
    EngineDisplayName string
    Expanded name for the engine name.
    ExternalId string
    Can be used for cross-account validation. Use it in another account with aws.dms.S3Endpoint to create the endpoint cross-account.
    Id string
    The provider-assigned unique ID for this managed resource.
    Status string
    Status of the endpoint.
    TagsAll map[string]string
    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    endpointArn String
    ARN for the endpoint.
    engineDisplayName String
    Expanded name for the engine name.
    externalId String
    Can be used for cross-account validation. Use it in another account with aws.dms.S3Endpoint to create the endpoint cross-account.
    id String
    The provider-assigned unique ID for this managed resource.
    status String
    Status of the endpoint.
    tagsAll Map<String,String>
    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    endpointArn string
    ARN for the endpoint.
    engineDisplayName string
    Expanded name for the engine name.
    externalId string
    Can be used for cross-account validation. Use it in another account with aws.dms.S3Endpoint to create the endpoint cross-account.
    id string
    The provider-assigned unique ID for this managed resource.
    status string
    Status of the endpoint.
    tagsAll {[key: string]: string}
    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    endpoint_arn str
    ARN for the endpoint.
    engine_display_name str
    Expanded name for the engine name.
    external_id str
    Can be used for cross-account validation. Use it in another account with aws.dms.S3Endpoint to create the endpoint cross-account.
    id str
    The provider-assigned unique ID for this managed resource.
    status str
    Status of the endpoint.
    tags_all Mapping[str, str]
    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    endpointArn String
    ARN for the endpoint.
    engineDisplayName String
    Expanded name for the engine name.
    externalId String
    Can be used for cross-account validation. Use it in another account with aws.dms.S3Endpoint to create the endpoint cross-account.
    id String
    The provider-assigned unique ID for this managed resource.
    status String
    Status of the endpoint.
    tagsAll Map<String>
    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    Look up Existing S3Endpoint Resource

    Get an existing S3Endpoint resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: S3EndpointState, opts?: CustomResourceOptions): S3Endpoint
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            add_column_name: Optional[bool] = None,
            add_trailing_padding_character: Optional[bool] = None,
            bucket_folder: Optional[str] = None,
            bucket_name: Optional[str] = None,
            canned_acl_for_objects: Optional[str] = None,
            cdc_inserts_and_updates: Optional[bool] = None,
            cdc_inserts_only: Optional[bool] = None,
            cdc_max_batch_interval: Optional[int] = None,
            cdc_min_file_size: Optional[int] = None,
            cdc_path: Optional[str] = None,
            certificate_arn: Optional[str] = None,
            compression_type: Optional[str] = None,
            csv_delimiter: Optional[str] = None,
            csv_no_sup_value: Optional[str] = None,
            csv_null_value: Optional[str] = None,
            csv_row_delimiter: Optional[str] = None,
            data_format: Optional[str] = None,
            data_page_size: Optional[int] = None,
            date_partition_delimiter: Optional[str] = None,
            date_partition_enabled: Optional[bool] = None,
            date_partition_sequence: Optional[str] = None,
            date_partition_timezone: Optional[str] = None,
            detach_target_on_lob_lookup_failure_parquet: Optional[bool] = None,
            dict_page_size_limit: Optional[int] = None,
            enable_statistics: Optional[bool] = None,
            encoding_type: Optional[str] = None,
            encryption_mode: Optional[str] = None,
            endpoint_arn: Optional[str] = None,
            endpoint_id: Optional[str] = None,
            endpoint_type: Optional[str] = None,
            engine_display_name: Optional[str] = None,
            expected_bucket_owner: Optional[str] = None,
            external_id: Optional[str] = None,
            external_table_definition: Optional[str] = None,
            glue_catalog_generation: Optional[bool] = None,
            ignore_header_rows: Optional[int] = None,
            include_op_for_full_load: Optional[bool] = None,
            kms_key_arn: Optional[str] = None,
            max_file_size: Optional[int] = None,
            parquet_timestamp_in_millisecond: Optional[bool] = None,
            parquet_version: Optional[str] = None,
            preserve_transactions: Optional[bool] = None,
            rfc4180: Optional[bool] = None,
            row_group_length: Optional[int] = None,
            server_side_encryption_kms_key_id: Optional[str] = None,
            service_access_role_arn: Optional[str] = None,
            ssl_mode: Optional[str] = None,
            status: Optional[str] = None,
            tags: Optional[Mapping[str, str]] = None,
            tags_all: Optional[Mapping[str, str]] = None,
            timestamp_column_name: Optional[str] = None,
            use_csv_no_sup_value: Optional[bool] = None,
            use_task_start_time_for_full_load_timestamp: Optional[bool] = None) -> S3Endpoint
    func GetS3Endpoint(ctx *Context, name string, id IDInput, state *S3EndpointState, opts ...ResourceOption) (*S3Endpoint, error)
    public static S3Endpoint Get(string name, Input<string> id, S3EndpointState? state, CustomResourceOptions? opts = null)
    public static S3Endpoint get(String name, Output<String> id, S3EndpointState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AddColumnName bool
    Whether to add column name information to the .csv output file. Default is false.
    AddTrailingPaddingCharacter bool
    Whether to add padding. Default is false. (Ignored for source endpoints.)
    BucketFolder string
    S3 object prefix.
    BucketName string
    S3 bucket name.
    CannedAclForObjects string
    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
    CdcInsertsAndUpdates bool
    Whether to write insert and update operations to .csv or .parquet output files. Default is false.
    CdcInsertsOnly bool
    Whether to write insert operations to .csv or .parquet output files. Default is false.
    CdcMaxBatchInterval int
    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is 60.)
    CdcMinFileSize int
    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. (AWS default is 32000 KB.)
    CdcPath string
    Folder path of CDC files. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
    CertificateArn string
    ARN for the certificate.
    CompressionType string
    Set to compress target files. Valid values are GZIP and NONE. Default is NONE. (Ignored for source endpoints.)
    CsvDelimiter string
    Delimiter used to separate columns in the source files. Default is ,.
    CsvNoSupValue string
    Only applies if output files for a CDC load are written in .csv format. If use_csv_no_sup_value is set to true, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of use_csv_no_sup_value. (Ignored for source endpoints.)
    CsvNullValue string
    String to as null when writing to the target. (AWS default is NULL.)
    CsvRowDelimiter string
    Delimiter used to separate rows in the source files. Default is newline (i.e., \n).
    DataFormat string
    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. (Ignored for source endpoints -- only csv is valid.)
    DataPageSize int
    Size of one data page in bytes. (AWS default is 1 MiB, i.e., 1048576.)
    DatePartitionDelimiter string
    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. (AWS default is SLASH.) (Ignored for source endpoints.)
    DatePartitionEnabled bool
    Partition S3 bucket folders based on transaction commit dates. Default is false. (Ignored for source endpoints.)
    DatePartitionSequence string
    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. (AWS default is YYYYMMDD.) (Ignored for source endpoints.)
    DatePartitionTimezone string
    Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (e.g., Europe/Paris). Use this when date_partition_enabled is true. (Ignored for source endpoints.)
    DetachTargetOnLobLookupFailureParquet bool
    Undocumented argument for use as directed by AWS Support.
    DictPageSizeLimit int
    Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, i.e., 1048576.)
    EnableStatistics bool
    Whether to enable statistics for Parquet pages and row groups. Default is true.
    EncodingType string
    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. (AWS default is rle_dictionary.)
    EncryptionMode string
    Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. (AWS default is SSE_S3.) (Ignored for source endpoints -- only SSE_S3 is valid.)
    EndpointArn string
    ARN for the endpoint.
    EndpointId string
    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
    EndpointType string
    Type of endpoint. Valid values are source, target.
    EngineDisplayName string
    Expanded name for the engine name.
    ExpectedBucketOwner string
    Bucket owner to prevent sniping. Value is an AWS account ID.
    ExternalId string
    Can be used for cross-account validation. Use it in another account with aws.dms.S3Endpoint to create the endpoint cross-account.
    ExternalTableDefinition string
    JSON document that describes how AWS DMS should interpret the data.
    GlueCatalogGeneration bool
    Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
    IgnoreHeaderRows int
    When this value is set to 1, DMS ignores the first row header in a .csv file. (AWS default is 0.)
    IncludeOpForFullLoad bool
    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
    KmsKeyArn string
    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
    MaxFileSize int
    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. (AWS default is 1 GB, i.e., 1048576.)
    ParquetTimestampInMillisecond bool
    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. (Ignored for source endpoints.)
    ParquetVersion string
    Version of the .parquet file format. Valid values are parquet-1-0 and parquet-2-0. (AWS default is parquet-1-0.) (Ignored for source endpoints.)
    PreserveTransactions bool
    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. (Ignored for source endpoints.)
    Rfc4180 bool
    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
    RowGroupLength int
    Number of rows in a row group. (AWS default is 10000.)
    ServerSideEncryptionKmsKeyId string
    When encryption_mode is SSE_KMS, ARN for the AWS KMS key. (Ignored for source endpoints -- only SSE_S3 encryption_mode is valid.)
    ServiceAccessRoleArn string

    ARN of the IAM role with permissions to the S3 Bucket.

    The following arguments are optional:

    SslMode string
    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full. (AWS default is none.)
    Status string
    Status of the endpoint.
    Tags Dictionary<string, string>
    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    TagsAll Dictionary<string, string>
    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    TimestampColumnName string
    Column to add with timestamp information to the endpoint data for an Amazon S3 target.
    UseCsvNoSupValue bool
    Whether to use csv_no_sup_value for columns not included in the supplemental log. (Ignored for source endpoints.)
    UseTaskStartTimeForFullLoadTimestamp bool
    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time.When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
    AddColumnName bool
    Whether to add column name information to the .csv output file. Default is false.
    AddTrailingPaddingCharacter bool
    Whether to add padding. Default is false. (Ignored for source endpoints.)
    BucketFolder string
    S3 object prefix.
    BucketName string
    S3 bucket name.
    CannedAclForObjects string
    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
    CdcInsertsAndUpdates bool
    Whether to write insert and update operations to .csv or .parquet output files. Default is false.
    CdcInsertsOnly bool
    Whether to write insert operations to .csv or .parquet output files. Default is false.
    CdcMaxBatchInterval int
    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is 60.)
    CdcMinFileSize int
    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. (AWS default is 32000 KB.)
    CdcPath string
    Folder path of CDC files. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
    CertificateArn string
    ARN for the certificate.
    CompressionType string
    Set to compress target files. Valid values are GZIP and NONE. Default is NONE. (Ignored for source endpoints.)
    CsvDelimiter string
    Delimiter used to separate columns in the source files. Default is ,.
    CsvNoSupValue string
    Only applies if output files for a CDC load are written in .csv format. If use_csv_no_sup_value is set to true, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of use_csv_no_sup_value. (Ignored for source endpoints.)
    CsvNullValue string
    String to as null when writing to the target. (AWS default is NULL.)
    CsvRowDelimiter string
    Delimiter used to separate rows in the source files. Default is newline (i.e., \n).
    DataFormat string
    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. (Ignored for source endpoints -- only csv is valid.)
    DataPageSize int
    Size of one data page in bytes. (AWS default is 1 MiB, i.e., 1048576.)
    DatePartitionDelimiter string
    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. (AWS default is SLASH.) (Ignored for source endpoints.)
    DatePartitionEnabled bool
    Partition S3 bucket folders based on transaction commit dates. Default is false. (Ignored for source endpoints.)
    DatePartitionSequence string
    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. (AWS default is YYYYMMDD.) (Ignored for source endpoints.)
    DatePartitionTimezone string
    Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (e.g., Europe/Paris). Use this when date_partition_enabled is true. (Ignored for source endpoints.)
    DetachTargetOnLobLookupFailureParquet bool
    Undocumented argument for use as directed by AWS Support.
    DictPageSizeLimit int
    Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, i.e., 1048576.)
    EnableStatistics bool
    Whether to enable statistics for Parquet pages and row groups. Default is true.
    EncodingType string
    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. (AWS default is rle_dictionary.)
    EncryptionMode string
    Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. (AWS default is SSE_S3.) (Ignored for source endpoints -- only SSE_S3 is valid.)
    EndpointArn string
    ARN for the endpoint.
    EndpointId string
    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
    EndpointType string
    Type of endpoint. Valid values are source, target.
    EngineDisplayName string
    Expanded name for the engine name.
    ExpectedBucketOwner string
    Bucket owner to prevent sniping. Value is an AWS account ID.
    ExternalId string
    Can be used for cross-account validation. Use it in another account with aws.dms.S3Endpoint to create the endpoint cross-account.
    ExternalTableDefinition string
    JSON document that describes how AWS DMS should interpret the data.
    GlueCatalogGeneration bool
    Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
    IgnoreHeaderRows int
    When this value is set to 1, DMS ignores the first row header in a .csv file. (AWS default is 0.)
    IncludeOpForFullLoad bool
    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
    KmsKeyArn string
    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
    MaxFileSize int
    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. (AWS default is 1 GB, i.e., 1048576.)
    ParquetTimestampInMillisecond bool
    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. (Ignored for source endpoints.)
    ParquetVersion string
    Version of the .parquet file format. Valid values are parquet-1-0 and parquet-2-0. (AWS default is parquet-1-0.) (Ignored for source endpoints.)
    PreserveTransactions bool
    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. (Ignored for source endpoints.)
    Rfc4180 bool
    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
    RowGroupLength int
    Number of rows in a row group. (AWS default is 10000.)
    ServerSideEncryptionKmsKeyId string
    When encryption_mode is SSE_KMS, ARN for the AWS KMS key. (Ignored for source endpoints -- only SSE_S3 encryption_mode is valid.)
    ServiceAccessRoleArn string

    ARN of the IAM role with permissions to the S3 Bucket.

    The following arguments are optional:

    SslMode string
    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full. (AWS default is none.)
    Status string
    Status of the endpoint.
    Tags map[string]string
    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    TagsAll map[string]string
    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    TimestampColumnName string
    Column to add with timestamp information to the endpoint data for an Amazon S3 target.
    UseCsvNoSupValue bool
    Whether to use csv_no_sup_value for columns not included in the supplemental log. (Ignored for source endpoints.)
    UseTaskStartTimeForFullLoadTimestamp bool
    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time.When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
    addColumnName Boolean
    Whether to add column name information to the .csv output file. Default is false.
    addTrailingPaddingCharacter Boolean
    Whether to add padding. Default is false. (Ignored for source endpoints.)
    bucketFolder String
    S3 object prefix.
    bucketName String
    S3 bucket name.
    cannedAclForObjects String
    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
    cdcInsertsAndUpdates Boolean
    Whether to write insert and update operations to .csv or .parquet output files. Default is false.
    cdcInsertsOnly Boolean
    Whether to write insert operations to .csv or .parquet output files. Default is false.
    cdcMaxBatchInterval Integer
    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is 60.)
    cdcMinFileSize Integer
    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. (AWS default is 32000 KB.)
    cdcPath String
    Folder path of CDC files. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
    certificateArn String
    ARN for the certificate.
    compressionType String
    Set to compress target files. Valid values are GZIP and NONE. Default is NONE. (Ignored for source endpoints.)
    csvDelimiter String
    Delimiter used to separate columns in the source files. Default is ,.
    csvNoSupValue String
    Only applies if output files for a CDC load are written in .csv format. If use_csv_no_sup_value is set to true, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of use_csv_no_sup_value. (Ignored for source endpoints.)
    csvNullValue String
    String to as null when writing to the target. (AWS default is NULL.)
    csvRowDelimiter String
    Delimiter used to separate rows in the source files. Default is newline (i.e., \n).
    dataFormat String
    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. (Ignored for source endpoints -- only csv is valid.)
    dataPageSize Integer
    Size of one data page in bytes. (AWS default is 1 MiB, i.e., 1048576.)
    datePartitionDelimiter String
    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. (AWS default is SLASH.) (Ignored for source endpoints.)
    datePartitionEnabled Boolean
    Partition S3 bucket folders based on transaction commit dates. Default is false. (Ignored for source endpoints.)
    datePartitionSequence String
    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. (AWS default is YYYYMMDD.) (Ignored for source endpoints.)
    datePartitionTimezone String
    Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (e.g., Europe/Paris). Use this when date_partition_enabled is true. (Ignored for source endpoints.)
    detachTargetOnLobLookupFailureParquet Boolean
    Undocumented argument for use as directed by AWS Support.
    dictPageSizeLimit Integer
    Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, i.e., 1048576.)
    enableStatistics Boolean
    Whether to enable statistics for Parquet pages and row groups. Default is true.
    encodingType String
    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. (AWS default is rle_dictionary.)
    encryptionMode String
    Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. (AWS default is SSE_S3.) (Ignored for source endpoints -- only SSE_S3 is valid.)
    endpointArn String
    ARN for the endpoint.
    endpointId String
    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
    endpointType String
    Type of endpoint. Valid values are source, target.
    engineDisplayName String
    Expanded name for the engine name.
    expectedBucketOwner String
    Bucket owner to prevent sniping. Value is an AWS account ID.
    externalId String
    Can be used for cross-account validation. Use it in another account with aws.dms.S3Endpoint to create the endpoint cross-account.
    externalTableDefinition String
    JSON document that describes how AWS DMS should interpret the data.
    glueCatalogGeneration Boolean
    Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
    ignoreHeaderRows Integer
    When this value is set to 1, DMS ignores the first row header in a .csv file. (AWS default is 0.)
    includeOpForFullLoad Boolean
    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
    kmsKeyArn String
    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
    maxFileSize Integer
    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. (AWS default is 1 GB, i.e., 1048576.)
    parquetTimestampInMillisecond Boolean
    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. (Ignored for source endpoints.)
    parquetVersion String
    Version of the .parquet file format. Valid values are parquet-1-0 and parquet-2-0. (AWS default is parquet-1-0.) (Ignored for source endpoints.)
    preserveTransactions Boolean
    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. (Ignored for source endpoints.)
    rfc4180 Boolean
    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
    rowGroupLength Integer
    Number of rows in a row group. (AWS default is 10000.)
    serverSideEncryptionKmsKeyId String
    When encryption_mode is SSE_KMS, ARN for the AWS KMS key. (Ignored for source endpoints -- only SSE_S3 encryption_mode is valid.)
    serviceAccessRoleArn String

    ARN of the IAM role with permissions to the S3 Bucket.

    The following arguments are optional:

    sslMode String
    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full. (AWS default is none.)
    status String
    Status of the endpoint.
    tags Map<String,String>
    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    tagsAll Map<String,String>
    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    timestampColumnName String
    Column to add with timestamp information to the endpoint data for an Amazon S3 target.
    useCsvNoSupValue Boolean
    Whether to use csv_no_sup_value for columns not included in the supplemental log. (Ignored for source endpoints.)
    useTaskStartTimeForFullLoadTimestamp Boolean
    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time.When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
    addColumnName boolean
    Whether to add column name information to the .csv output file. Default is false.
    addTrailingPaddingCharacter boolean
    Whether to add padding. Default is false. (Ignored for source endpoints.)
    bucketFolder string
    S3 object prefix.
    bucketName string
    S3 bucket name.
    cannedAclForObjects string
    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
    cdcInsertsAndUpdates boolean
    Whether to write insert and update operations to .csv or .parquet output files. Default is false.
    cdcInsertsOnly boolean
    Whether to write insert operations to .csv or .parquet output files. Default is false.
    cdcMaxBatchInterval number
    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is 60.)
    cdcMinFileSize number
    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. (AWS default is 32000 KB.)
    cdcPath string
    Folder path of CDC files. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
    certificateArn string
    ARN for the certificate.
    compressionType string
    Set to compress target files. Valid values are GZIP and NONE. Default is NONE. (Ignored for source endpoints.)
    csvDelimiter string
    Delimiter used to separate columns in the source files. Default is ,.
    csvNoSupValue string
    Only applies if output files for a CDC load are written in .csv format. If use_csv_no_sup_value is set to true, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of use_csv_no_sup_value. (Ignored for source endpoints.)
    csvNullValue string
    String to as null when writing to the target. (AWS default is NULL.)
    csvRowDelimiter string
    Delimiter used to separate rows in the source files. Default is newline (i.e., \n).
    dataFormat string
    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. (Ignored for source endpoints -- only csv is valid.)
    dataPageSize number
    Size of one data page in bytes. (AWS default is 1 MiB, i.e., 1048576.)
    datePartitionDelimiter string
    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. (AWS default is SLASH.) (Ignored for source endpoints.)
    datePartitionEnabled boolean
    Partition S3 bucket folders based on transaction commit dates. Default is false. (Ignored for source endpoints.)
    datePartitionSequence string
    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. (AWS default is YYYYMMDD.) (Ignored for source endpoints.)
    datePartitionTimezone string
    Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (e.g., Europe/Paris). Use this when date_partition_enabled is true. (Ignored for source endpoints.)
    detachTargetOnLobLookupFailureParquet boolean
    Undocumented argument for use as directed by AWS Support.
    dictPageSizeLimit number
    Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, i.e., 1048576.)
    enableStatistics boolean
    Whether to enable statistics for Parquet pages and row groups. Default is true.
    encodingType string
    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. (AWS default is rle_dictionary.)
    encryptionMode string
    Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. (AWS default is SSE_S3.) (Ignored for source endpoints -- only SSE_S3 is valid.)
    endpointArn string
    ARN for the endpoint.
    endpointId string
    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
    endpointType string
    Type of endpoint. Valid values are source, target.
    engineDisplayName string
    Expanded name for the engine name.
    expectedBucketOwner string
    Bucket owner to prevent sniping. Value is an AWS account ID.
    externalId string
    Can be used for cross-account validation. Use it in another account with aws.dms.S3Endpoint to create the endpoint cross-account.
    externalTableDefinition string
    JSON document that describes how AWS DMS should interpret the data.
    glueCatalogGeneration boolean
    Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
    ignoreHeaderRows number
    When this value is set to 1, DMS ignores the first row header in a .csv file. (AWS default is 0.)
    includeOpForFullLoad boolean
    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
    kmsKeyArn string
    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
    maxFileSize number
    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. (AWS default is 1 GB, i.e., 1048576.)
    parquetTimestampInMillisecond boolean
    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. (Ignored for source endpoints.)
    parquetVersion string
    Version of the .parquet file format. Valid values are parquet-1-0 and parquet-2-0. (AWS default is parquet-1-0.) (Ignored for source endpoints.)
    preserveTransactions boolean
    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. (Ignored for source endpoints.)
    rfc4180 boolean
    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
    rowGroupLength number
    Number of rows in a row group. (AWS default is 10000.)
    serverSideEncryptionKmsKeyId string
    When encryption_mode is SSE_KMS, ARN for the AWS KMS key. (Ignored for source endpoints -- only SSE_S3 encryption_mode is valid.)
    serviceAccessRoleArn string

    ARN of the IAM role with permissions to the S3 Bucket.

    The following arguments are optional:

    sslMode string
    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full. (AWS default is none.)
    status string
    Status of the endpoint.
    tags {[key: string]: string}
    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    tagsAll {[key: string]: string}
    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    timestampColumnName string
    Column to add with timestamp information to the endpoint data for an Amazon S3 target.
    useCsvNoSupValue boolean
    Whether to use csv_no_sup_value for columns not included in the supplemental log. (Ignored for source endpoints.)
    useTaskStartTimeForFullLoadTimestamp boolean
    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time.When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
    add_column_name bool
    Whether to add column name information to the .csv output file. Default is false.
    add_trailing_padding_character bool
    Whether to add padding. Default is false. (Ignored for source endpoints.)
    bucket_folder str
    S3 object prefix.
    bucket_name str
    S3 bucket name.
    canned_acl_for_objects str
    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
    cdc_inserts_and_updates bool
    Whether to write insert and update operations to .csv or .parquet output files. Default is false.
    cdc_inserts_only bool
    Whether to write insert operations to .csv or .parquet output files. Default is false.
    cdc_max_batch_interval int
    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is 60.)
    cdc_min_file_size int
    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. (AWS default is 32000 KB.)
    cdc_path str
    Folder path of CDC files. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
    certificate_arn str
    ARN for the certificate.
    compression_type str
    Set to compress target files. Valid values are GZIP and NONE. Default is NONE. (Ignored for source endpoints.)
    csv_delimiter str
    Delimiter used to separate columns in the source files. Default is ,.
    csv_no_sup_value str
    Only applies if output files for a CDC load are written in .csv format. If use_csv_no_sup_value is set to true, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of use_csv_no_sup_value. (Ignored for source endpoints.)
    csv_null_value str
    String to as null when writing to the target. (AWS default is NULL.)
    csv_row_delimiter str
    Delimiter used to separate rows in the source files. Default is newline (i.e., \n).
    data_format str
    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. (Ignored for source endpoints -- only csv is valid.)
    data_page_size int
    Size of one data page in bytes. (AWS default is 1 MiB, i.e., 1048576.)
    date_partition_delimiter str
    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. (AWS default is SLASH.) (Ignored for source endpoints.)
    date_partition_enabled bool
    Partition S3 bucket folders based on transaction commit dates. Default is false. (Ignored for source endpoints.)
    date_partition_sequence str
    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. (AWS default is YYYYMMDD.) (Ignored for source endpoints.)
    date_partition_timezone str
    Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (e.g., Europe/Paris). Use this when date_partition_enabled is true. (Ignored for source endpoints.)
    detach_target_on_lob_lookup_failure_parquet bool
    Undocumented argument for use as directed by AWS Support.
    dict_page_size_limit int
    Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, i.e., 1048576.)
    enable_statistics bool
    Whether to enable statistics for Parquet pages and row groups. Default is true.
    encoding_type str
    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. (AWS default is rle_dictionary.)
    encryption_mode str
    Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. (AWS default is SSE_S3.) (Ignored for source endpoints -- only SSE_S3 is valid.)
    endpoint_arn str
    ARN for the endpoint.
    endpoint_id str
    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
    endpoint_type str
    Type of endpoint. Valid values are source, target.
    engine_display_name str
    Expanded name for the engine name.
    expected_bucket_owner str
    Bucket owner to prevent sniping. Value is an AWS account ID.
    external_id str
    Can be used for cross-account validation. Use it in another account with aws.dms.S3Endpoint to create the endpoint cross-account.
    external_table_definition str
    JSON document that describes how AWS DMS should interpret the data.
    glue_catalog_generation bool
    Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
    ignore_header_rows int
    When this value is set to 1, DMS ignores the first row header in a .csv file. (AWS default is 0.)
    include_op_for_full_load bool
    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
    kms_key_arn str
    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
    max_file_size int
    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. (AWS default is 1 GB, i.e., 1048576.)
    parquet_timestamp_in_millisecond bool
    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. (Ignored for source endpoints.)
    parquet_version str
    Version of the .parquet file format. Valid values are parquet-1-0 and parquet-2-0. (AWS default is parquet-1-0.) (Ignored for source endpoints.)
    preserve_transactions bool
    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. (Ignored for source endpoints.)
    rfc4180 bool
    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
    row_group_length int
    Number of rows in a row group. (AWS default is 10000.)
    server_side_encryption_kms_key_id str
    When encryption_mode is SSE_KMS, ARN for the AWS KMS key. (Ignored for source endpoints -- only SSE_S3 encryption_mode is valid.)
    service_access_role_arn str

    ARN of the IAM role with permissions to the S3 Bucket.

    The following arguments are optional:

    ssl_mode str
    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full. (AWS default is none.)
    status str
    Status of the endpoint.
    tags Mapping[str, str]
    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    tags_all Mapping[str, str]
    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    timestamp_column_name str
    Column to add with timestamp information to the endpoint data for an Amazon S3 target.
    use_csv_no_sup_value bool
    Whether to use csv_no_sup_value for columns not included in the supplemental log. (Ignored for source endpoints.)
    use_task_start_time_for_full_load_timestamp bool
    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time.When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
    addColumnName Boolean
    Whether to add column name information to the .csv output file. Default is false.
    addTrailingPaddingCharacter Boolean
    Whether to add padding. Default is false. (Ignored for source endpoints.)
    bucketFolder String
    S3 object prefix.
    bucketName String
    S3 bucket name.
    cannedAclForObjects String
    Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
    cdcInsertsAndUpdates Boolean
    Whether to write insert and update operations to .csv or .parquet output files. Default is false.
    cdcInsertsOnly Boolean
    Whether to write insert operations to .csv or .parquet output files. Default is false.
    cdcMaxBatchInterval Number
    Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. (AWS default is 60.)
    cdcMinFileSize Number
    Minimum file size condition as defined in kilobytes to output a file to Amazon S3. (AWS default is 32000 KB.)
    cdcPath String
    Folder path of CDC files. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
    certificateArn String
    ARN for the certificate.
    compressionType String
    Set to compress target files. Valid values are GZIP and NONE. Default is NONE. (Ignored for source endpoints.)
    csvDelimiter String
    Delimiter used to separate columns in the source files. Default is ,.
    csvNoSupValue String
    Only applies if output files for a CDC load are written in .csv format. If use_csv_no_sup_value is set to true, string to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of use_csv_no_sup_value. (Ignored for source endpoints.)
    csvNullValue String
    String to as null when writing to the target. (AWS default is NULL.)
    csvRowDelimiter String
    Delimiter used to separate rows in the source files. Default is newline (i.e., \n).
    dataFormat String
    Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. (Ignored for source endpoints -- only csv is valid.)
    dataPageSize Number
    Size of one data page in bytes. (AWS default is 1 MiB, i.e., 1048576.)
    datePartitionDelimiter String
    Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. (AWS default is SLASH.) (Ignored for source endpoints.)
    datePartitionEnabled Boolean
    Partition S3 bucket folders based on transaction commit dates. Default is false. (Ignored for source endpoints.)
    datePartitionSequence String
    Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. (AWS default is YYYYMMDD.) (Ignored for source endpoints.)
    datePartitionTimezone String
    Convert the current UTC time to a timezone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The timezone format is Area/Location (e.g., Europe/Paris). Use this when date_partition_enabled is true. (Ignored for source endpoints.)
    detachTargetOnLobLookupFailureParquet Boolean
    Undocumented argument for use as directed by AWS Support.
    dictPageSizeLimit Number
    Maximum size in bytes of an encoded dictionary page of a column. (AWS default is 1 MiB, i.e., 1048576.)
    enableStatistics Boolean
    Whether to enable statistics for Parquet pages and row groups. Default is true.
    encodingType String
    Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. (AWS default is rle_dictionary.)
    encryptionMode String
    Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. (AWS default is SSE_S3.) (Ignored for source endpoints -- only SSE_S3 is valid.)
    endpointArn String
    ARN for the endpoint.
    endpointId String
    Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
    endpointType String
    Type of endpoint. Valid values are source, target.
    engineDisplayName String
    Expanded name for the engine name.
    expectedBucketOwner String
    Bucket owner to prevent sniping. Value is an AWS account ID.
    externalId String
    Can be used for cross-account validation. Use it in another account with aws.dms.S3Endpoint to create the endpoint cross-account.
    externalTableDefinition String
    JSON document that describes how AWS DMS should interpret the data.
    glueCatalogGeneration Boolean
    Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
    ignoreHeaderRows Number
    When this value is set to 1, DMS ignores the first row header in a .csv file. (AWS default is 0.)
    includeOpForFullLoad Boolean
    Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
    kmsKeyArn String
    ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
    maxFileSize Number
    Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. (AWS default is 1 GB, i.e., 1048576.)
    parquetTimestampInMillisecond Boolean
    Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. (Ignored for source endpoints.)
    parquetVersion String
    Version of the .parquet file format. Valid values are parquet-1-0 and parquet-2-0. (AWS default is parquet-1-0.) (Ignored for source endpoints.)
    preserveTransactions Boolean
    Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. (Ignored for source endpoints.)
    rfc4180 Boolean
    For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
    rowGroupLength Number
    Number of rows in a row group. (AWS default is 10000.)
    serverSideEncryptionKmsKeyId String
    When encryption_mode is SSE_KMS, ARN for the AWS KMS key. (Ignored for source endpoints -- only SSE_S3 encryption_mode is valid.)
    serviceAccessRoleArn String

    ARN of the IAM role with permissions to the S3 Bucket.

    The following arguments are optional:

    sslMode String
    SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full. (AWS default is none.)
    status String
    Status of the endpoint.
    tags Map<String>
    Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    tagsAll Map<String>
    Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    timestampColumnName String
    Column to add with timestamp information to the endpoint data for an Amazon S3 target.
    useCsvNoSupValue Boolean
    Whether to use csv_no_sup_value for columns not included in the supplemental log. (Ignored for source endpoints.)
    useTaskStartTimeForFullLoadTimestamp Boolean
    When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time.When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.

    Import

    Using pulumi import, import endpoints using the endpoint_id. For example:

    $ pulumi import aws:dms/s3Endpoint:S3Endpoint example example-dms-endpoint-tf
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    AWS Classic pulumi/pulumi-aws
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the aws Terraform Provider.
    aws logo
    AWS v6.64.0 published on Friday, Dec 6, 2024 by Pulumi