Export AWS DynamoDB Tables

The aws:dynamodb/tableExport:TableExport resource, part of the Pulumi AWS provider, creates point-in-time exports of DynamoDB table data to S3, either as full snapshots or incremental change sets. This guide focuses on three capabilities: full table snapshots, historical point-in-time exports, and incremental change capture.

Table exports require Point-in-time Recovery enabled on the source table and S3 buckets with appropriate permissions. The examples are intentionally small. Combine them with your own table and bucket infrastructure. Note that exports are immutable; Pulumi removes them from state on destroy but does not delete the exported data.

Export a table snapshot to S3

Analytics pipelines often extract DynamoDB data for processing in data lakes or warehouses. Table exports create point-in-time snapshots without impacting production traffic.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.s3.Bucket("example", {
    bucketPrefix: "example",
    forceDestroy: true,
});
const exampleTable = new aws.dynamodb.Table("example", {
    name: "example-table-1",
    billingMode: "PAY_PER_REQUEST",
    hashKey: "user_id",
    attributes: [{
        name: "user_id",
        type: "S",
    }],
    pointInTimeRecovery: {
        enabled: true,
    },
});
const exampleTableExport = new aws.dynamodb.TableExport("example", {
    tableArn: exampleTable.arn,
    s3Bucket: example.id,
});
import pulumi
import pulumi_aws as aws

example = aws.s3.Bucket("example",
    bucket_prefix="example",
    force_destroy=True)
example_table = aws.dynamodb.Table("example",
    name="example-table-1",
    billing_mode="PAY_PER_REQUEST",
    hash_key="user_id",
    attributes=[{
        "name": "user_id",
        "type": "S",
    }],
    point_in_time_recovery={
        "enabled": True,
    })
example_table_export = aws.dynamodb.TableExport("example",
    table_arn=example_table.arn,
    s3_bucket=example.id)
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/dynamodb"
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/s3"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		example, err := s3.NewBucket(ctx, "example", &s3.BucketArgs{
			BucketPrefix: pulumi.String("example"),
			ForceDestroy: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		exampleTable, err := dynamodb.NewTable(ctx, "example", &dynamodb.TableArgs{
			Name:        pulumi.String("example-table-1"),
			BillingMode: pulumi.String("PAY_PER_REQUEST"),
			HashKey:     pulumi.String("user_id"),
			Attributes: dynamodb.TableAttributeArray{
				&dynamodb.TableAttributeArgs{
					Name: pulumi.String("user_id"),
					Type: pulumi.String("S"),
				},
			},
			PointInTimeRecovery: &dynamodb.TablePointInTimeRecoveryArgs{
				Enabled: pulumi.Bool(true),
			},
		})
		if err != nil {
			return err
		}
		_, err = dynamodb.NewTableExport(ctx, "example", &dynamodb.TableExportArgs{
			TableArn: exampleTable.Arn,
			S3Bucket: example.ID(),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var example = new Aws.S3.Bucket("example", new()
    {
        BucketPrefix = "example",
        ForceDestroy = true,
    });

    var exampleTable = new Aws.DynamoDB.Table("example", new()
    {
        Name = "example-table-1",
        BillingMode = "PAY_PER_REQUEST",
        HashKey = "user_id",
        Attributes = new[]
        {
            new Aws.DynamoDB.Inputs.TableAttributeArgs
            {
                Name = "user_id",
                Type = "S",
            },
        },
        PointInTimeRecovery = new Aws.DynamoDB.Inputs.TablePointInTimeRecoveryArgs
        {
            Enabled = true,
        },
    });

    var exampleTableExport = new Aws.DynamoDB.TableExport("example", new()
    {
        TableArn = exampleTable.Arn,
        S3Bucket = example.Id,
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.s3.Bucket;
import com.pulumi.aws.s3.BucketArgs;
import com.pulumi.aws.dynamodb.Table;
import com.pulumi.aws.dynamodb.TableArgs;
import com.pulumi.aws.dynamodb.inputs.TableAttributeArgs;
import com.pulumi.aws.dynamodb.inputs.TablePointInTimeRecoveryArgs;
import com.pulumi.aws.dynamodb.TableExport;
import com.pulumi.aws.dynamodb.TableExportArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new Bucket("example", BucketArgs.builder()
            .bucketPrefix("example")
            .forceDestroy(true)
            .build());

        var exampleTable = new Table("exampleTable", TableArgs.builder()
            .name("example-table-1")
            .billingMode("PAY_PER_REQUEST")
            .hashKey("user_id")
            .attributes(TableAttributeArgs.builder()
                .name("user_id")
                .type("S")
                .build())
            .pointInTimeRecovery(TablePointInTimeRecoveryArgs.builder()
                .enabled(true)
                .build())
            .build());

        var exampleTableExport = new TableExport("exampleTableExport", TableExportArgs.builder()
            .tableArn(exampleTable.arn())
            .s3Bucket(example.id())
            .build());

    }
}
resources:
  example:
    type: aws:s3:Bucket
    properties:
      bucketPrefix: example
      forceDestroy: true
  exampleTable:
    type: aws:dynamodb:Table
    name: example
    properties:
      name: example-table-1
      billingMode: PAY_PER_REQUEST
      hashKey: user_id
      attributes:
        - name: user_id
          type: S
      pointInTimeRecovery:
        enabled: true
  exampleTableExport:
    type: aws:dynamodb:TableExport
    name: example
    properties:
      tableArn: ${exampleTable.arn}
      s3Bucket: ${example.id}

The export captures the table’s current state and writes it to S3. The tableArn identifies the source table, which must have pointInTimeRecovery enabled. The s3Bucket receives the exported data. Pulumi waits until the export reaches COMPLETED or FAILED status before proceeding.

Export from a specific point in time

Compliance or debugging workflows may require exporting table state from a specific moment, such as before a deployment or during an incident window.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.dynamodb.TableExport("example", {
    exportTime: "2023-04-02T11:30:13+01:00",
    s3Bucket: exampleAwsS3Bucket.id,
    tableArn: exampleAwsDynamodbTable.arn,
});
import pulumi
import pulumi_aws as aws

example = aws.dynamodb.TableExport("example",
    export_time="2023-04-02T11:30:13+01:00",
    s3_bucket=example_aws_s3_bucket["id"],
    table_arn=example_aws_dynamodb_table["arn"])
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/dynamodb"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dynamodb.NewTableExport(ctx, "example", &dynamodb.TableExportArgs{
			ExportTime: pulumi.String("2023-04-02T11:30:13+01:00"),
			S3Bucket:   pulumi.Any(exampleAwsS3Bucket.Id),
			TableArn:   pulumi.Any(exampleAwsDynamodbTable.Arn),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var example = new Aws.DynamoDB.TableExport("example", new()
    {
        ExportTime = "2023-04-02T11:30:13+01:00",
        S3Bucket = exampleAwsS3Bucket.Id,
        TableArn = exampleAwsDynamodbTable.Arn,
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.dynamodb.TableExport;
import com.pulumi.aws.dynamodb.TableExportArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new TableExport("example", TableExportArgs.builder()
            .exportTime("2023-04-02T11:30:13+01:00")
            .s3Bucket(exampleAwsS3Bucket.id())
            .tableArn(exampleAwsDynamodbTable.arn())
            .build());

    }
}
resources:
  example:
    type: aws:dynamodb:TableExport
    properties:
      exportTime: 2023-04-02T11:30:13+01:00
      s3Bucket: ${exampleAwsS3Bucket.id}
      tableArn: ${exampleAwsDynamodbTable.arn}

The exportTime property specifies an RFC3339 timestamp to export from. The export captures the table’s state at that exact moment, provided Point-in-time Recovery was enabled at that time. Omitting exportTime defaults to the current time.

Export changes within a time window

Change data capture pipelines need to extract only modifications that occurred during a specific period, avoiding the cost and time of full table exports.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.dynamodb.TableExport("example", {
    exportType: "INCREMENTAL_EXPORT",
    s3Bucket: exampleAwsS3Bucket.id,
    tableArn: exampleAwsDynamodbTable.arn,
    incrementalExportSpecification: {
        exportFromTime: "2025-02-09T12:00:00+01:00",
        exportToTime: "2025-02-09T13:00:00+01:00",
    },
});
import pulumi
import pulumi_aws as aws

example = aws.dynamodb.TableExport("example",
    export_type="INCREMENTAL_EXPORT",
    s3_bucket=example_aws_s3_bucket["id"],
    table_arn=example_aws_dynamodb_table["arn"],
    incremental_export_specification={
        "export_from_time": "2025-02-09T12:00:00+01:00",
        "export_to_time": "2025-02-09T13:00:00+01:00",
    })
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/dynamodb"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dynamodb.NewTableExport(ctx, "example", &dynamodb.TableExportArgs{
			ExportType: pulumi.String("INCREMENTAL_EXPORT"),
			S3Bucket:   pulumi.Any(exampleAwsS3Bucket.Id),
			TableArn:   pulumi.Any(exampleAwsDynamodbTable.Arn),
			IncrementalExportSpecification: &dynamodb.TableExportIncrementalExportSpecificationArgs{
				ExportFromTime: pulumi.String("2025-02-09T12:00:00+01:00"),
				ExportToTime:   pulumi.String("2025-02-09T13:00:00+01:00"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var example = new Aws.DynamoDB.TableExport("example", new()
    {
        ExportType = "INCREMENTAL_EXPORT",
        S3Bucket = exampleAwsS3Bucket.Id,
        TableArn = exampleAwsDynamodbTable.Arn,
        IncrementalExportSpecification = new Aws.DynamoDB.Inputs.TableExportIncrementalExportSpecificationArgs
        {
            ExportFromTime = "2025-02-09T12:00:00+01:00",
            ExportToTime = "2025-02-09T13:00:00+01:00",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.dynamodb.TableExport;
import com.pulumi.aws.dynamodb.TableExportArgs;
import com.pulumi.aws.dynamodb.inputs.TableExportIncrementalExportSpecificationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new TableExport("example", TableExportArgs.builder()
            .exportType("INCREMENTAL_EXPORT")
            .s3Bucket(exampleAwsS3Bucket.id())
            .tableArn(exampleAwsDynamodbTable.arn())
            .incrementalExportSpecification(TableExportIncrementalExportSpecificationArgs.builder()
                .exportFromTime("2025-02-09T12:00:00+01:00")
                .exportToTime("2025-02-09T13:00:00+01:00")
                .build())
            .build());

    }
}
resources:
  example:
    type: aws:dynamodb:TableExport
    properties:
      exportType: INCREMENTAL_EXPORT
      s3Bucket: ${exampleAwsS3Bucket.id}
      tableArn: ${exampleAwsDynamodbTable.arn}
      incrementalExportSpecification:
        exportFromTime: 2025-02-09T12:00:00+01:00
        exportToTime: 2025-02-09T13:00:00+01:00

Setting exportType to INCREMENTAL_EXPORT captures only changes between exportFromTime and exportToTime. The incrementalExportSpecification block defines the time window. This approach reduces export size and cost for pipelines that process changes incrementally.

Beyond these examples

These snippets focus on specific table export features: full and incremental exports, and point-in-time snapshots. They’re intentionally minimal rather than complete data pipeline configurations.

The examples reference pre-existing infrastructure such as DynamoDB tables with Point-in-time Recovery enabled, and S3 buckets with appropriate permissions. They focus on export configuration rather than provisioning the surrounding infrastructure.

To keep things focused, common export patterns are omitted, including:

  • Export format selection (exportFormat: DYNAMODB_JSON vs ION)
  • S3 path organization (s3Prefix)
  • Encryption configuration (s3SseAlgorithm, s3SseKmsKeyId)
  • Cross-account exports (s3BucketOwner)

These omissions are intentional: the goal is to illustrate how each export feature is wired, not provide drop-in data pipeline modules. See the DynamoDB TableExport resource reference for all available configuration options.

Let's export AWS DynamoDB Tables

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

Prerequisites & Setup
Why do I need Point-in-time Recovery enabled on my DynamoDB table?
Table exports require Point-in-time Recovery to be enabled on the source table. Configure pointInTimeRecovery.enabled = true on your table before creating an export.
Export Configuration
What's the difference between full and incremental exports?
Full exports (FULL_EXPORT, the default) capture the entire table at a point in time. Incremental exports (INCREMENTAL_EXPORT) capture only changes between two timestamps and require incrementalExportSpecification.
How do I export data from a specific point in time?
Set exportTime to an RFC3339 timestamp like 2023-04-02T11:30:13+01:00. Omitting this property exports a snapshot from the current time.
How do I create an incremental export?
Set exportType to INCREMENTAL_EXPORT and provide incrementalExportSpecification with exportFromTime and exportToTime timestamps.
What export formats are available?
You can export in DYNAMODB_JSON (default) or ION format using the exportFormat property.
Lifecycle & Immutability
What properties are immutable after creation?
All major properties are immutable: exportTime, exportType, tableArn, s3Bucket, s3Prefix, s3SseAlgorithm, s3BucketOwner, exportFormat, and incrementalExportSpecification. Any changes require creating a new export.
What happens to my exported data when I destroy the resource?
Destroying the resource removes it from Pulumi state but doesn’t delete the exported data from S3. You must manually delete S3 objects if needed.
How long does a table export take?
Pulumi waits until the export reaches COMPLETED or FAILED status. Duration depends on table size and AWS processing time.
S3 & Storage
What encryption options are available for exported data?
Use s3SseAlgorithm to specify AES256 or KMS encryption. For KMS, provide the key ID via s3SseKmsKeyId.

Using a different cloud?

Explore database guides for other cloud providers: