Export AWS DynamoDB Tables

The aws:dynamodb/tableExport:TableExport resource, part of the Pulumi AWS provider, exports DynamoDB table data to S3 as either full snapshots or incremental change sets. This guide focuses on three capabilities: full table snapshots, point-in-time exports, and incremental change exports.

Table exports require DynamoDB tables with point-in-time recovery enabled and S3 buckets with appropriate permissions configured. The examples are intentionally small. Combine them with your own table and bucket infrastructure.

Export a table snapshot to S3

Data pipelines often export DynamoDB tables to S3 for analytics, archival, or cross-region replication.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.s3.Bucket("example", {
    bucketPrefix: "example",
    forceDestroy: true,
});
const exampleTable = new aws.dynamodb.Table("example", {
    name: "example-table-1",
    billingMode: "PAY_PER_REQUEST",
    hashKey: "user_id",
    attributes: [{
        name: "user_id",
        type: "S",
    }],
    pointInTimeRecovery: {
        enabled: true,
    },
});
const exampleTableExport = new aws.dynamodb.TableExport("example", {
    tableArn: exampleTable.arn,
    s3Bucket: example.id,
});
import pulumi
import pulumi_aws as aws

example = aws.s3.Bucket("example",
    bucket_prefix="example",
    force_destroy=True)
example_table = aws.dynamodb.Table("example",
    name="example-table-1",
    billing_mode="PAY_PER_REQUEST",
    hash_key="user_id",
    attributes=[{
        "name": "user_id",
        "type": "S",
    }],
    point_in_time_recovery={
        "enabled": True,
    })
example_table_export = aws.dynamodb.TableExport("example",
    table_arn=example_table.arn,
    s3_bucket=example.id)
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/dynamodb"
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/s3"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		example, err := s3.NewBucket(ctx, "example", &s3.BucketArgs{
			BucketPrefix: pulumi.String("example"),
			ForceDestroy: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		exampleTable, err := dynamodb.NewTable(ctx, "example", &dynamodb.TableArgs{
			Name:        pulumi.String("example-table-1"),
			BillingMode: pulumi.String("PAY_PER_REQUEST"),
			HashKey:     pulumi.String("user_id"),
			Attributes: dynamodb.TableAttributeArray{
				&dynamodb.TableAttributeArgs{
					Name: pulumi.String("user_id"),
					Type: pulumi.String("S"),
				},
			},
			PointInTimeRecovery: &dynamodb.TablePointInTimeRecoveryArgs{
				Enabled: pulumi.Bool(true),
			},
		})
		if err != nil {
			return err
		}
		_, err = dynamodb.NewTableExport(ctx, "example", &dynamodb.TableExportArgs{
			TableArn: exampleTable.Arn,
			S3Bucket: example.ID(),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var example = new Aws.S3.Bucket("example", new()
    {
        BucketPrefix = "example",
        ForceDestroy = true,
    });

    var exampleTable = new Aws.DynamoDB.Table("example", new()
    {
        Name = "example-table-1",
        BillingMode = "PAY_PER_REQUEST",
        HashKey = "user_id",
        Attributes = new[]
        {
            new Aws.DynamoDB.Inputs.TableAttributeArgs
            {
                Name = "user_id",
                Type = "S",
            },
        },
        PointInTimeRecovery = new Aws.DynamoDB.Inputs.TablePointInTimeRecoveryArgs
        {
            Enabled = true,
        },
    });

    var exampleTableExport = new Aws.DynamoDB.TableExport("example", new()
    {
        TableArn = exampleTable.Arn,
        S3Bucket = example.Id,
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.s3.Bucket;
import com.pulumi.aws.s3.BucketArgs;
import com.pulumi.aws.dynamodb.Table;
import com.pulumi.aws.dynamodb.TableArgs;
import com.pulumi.aws.dynamodb.inputs.TableAttributeArgs;
import com.pulumi.aws.dynamodb.inputs.TablePointInTimeRecoveryArgs;
import com.pulumi.aws.dynamodb.TableExport;
import com.pulumi.aws.dynamodb.TableExportArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new Bucket("example", BucketArgs.builder()
            .bucketPrefix("example")
            .forceDestroy(true)
            .build());

        var exampleTable = new Table("exampleTable", TableArgs.builder()
            .name("example-table-1")
            .billingMode("PAY_PER_REQUEST")
            .hashKey("user_id")
            .attributes(TableAttributeArgs.builder()
                .name("user_id")
                .type("S")
                .build())
            .pointInTimeRecovery(TablePointInTimeRecoveryArgs.builder()
                .enabled(true)
                .build())
            .build());

        var exampleTableExport = new TableExport("exampleTableExport", TableExportArgs.builder()
            .tableArn(exampleTable.arn())
            .s3Bucket(example.id())
            .build());

    }
}
resources:
  example:
    type: aws:s3:Bucket
    properties:
      bucketPrefix: example
      forceDestroy: true
  exampleTable:
    type: aws:dynamodb:Table
    name: example
    properties:
      name: example-table-1
      billingMode: PAY_PER_REQUEST
      hashKey: user_id
      attributes:
        - name: user_id
          type: S
      pointInTimeRecovery:
        enabled: true
  exampleTableExport:
    type: aws:dynamodb:TableExport
    name: example
    properties:
      tableArn: ${exampleTable.arn}
      s3Bucket: ${example.id}

When you create a TableExport without specifying exportTime, DynamoDB captures the current table state and writes it to the specified S3 bucket. The tableArn identifies the source table, and s3Bucket specifies the destination. Point-in-time recovery must be enabled on the table (shown in the pointInTimeRecovery block) for exports to work.

Export from a specific point in time

When investigating issues or auditing historical data, you can export table state from a specific moment.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.dynamodb.TableExport("example", {
    exportTime: "2023-04-02T11:30:13+01:00",
    s3Bucket: exampleAwsS3Bucket.id,
    tableArn: exampleAwsDynamodbTable.arn,
});
import pulumi
import pulumi_aws as aws

example = aws.dynamodb.TableExport("example",
    export_time="2023-04-02T11:30:13+01:00",
    s3_bucket=example_aws_s3_bucket["id"],
    table_arn=example_aws_dynamodb_table["arn"])
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/dynamodb"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dynamodb.NewTableExport(ctx, "example", &dynamodb.TableExportArgs{
			ExportTime: pulumi.String("2023-04-02T11:30:13+01:00"),
			S3Bucket:   pulumi.Any(exampleAwsS3Bucket.Id),
			TableArn:   pulumi.Any(exampleAwsDynamodbTable.Arn),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var example = new Aws.DynamoDB.TableExport("example", new()
    {
        ExportTime = "2023-04-02T11:30:13+01:00",
        S3Bucket = exampleAwsS3Bucket.Id,
        TableArn = exampleAwsDynamodbTable.Arn,
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.dynamodb.TableExport;
import com.pulumi.aws.dynamodb.TableExportArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new TableExport("example", TableExportArgs.builder()
            .exportTime("2023-04-02T11:30:13+01:00")
            .s3Bucket(exampleAwsS3Bucket.id())
            .tableArn(exampleAwsDynamodbTable.arn())
            .build());

    }
}
resources:
  example:
    type: aws:dynamodb:TableExport
    properties:
      exportTime: 2023-04-02T11:30:13+01:00
      s3Bucket: ${exampleAwsS3Bucket.id}
      tableArn: ${exampleAwsDynamodbTable.arn}

The exportTime property accepts an RFC3339 timestamp that specifies which point in the table’s history to export. DynamoDB uses point-in-time recovery data to reconstruct the table state at that moment. This is useful for compliance audits or recovering from data corruption that occurred after a known good state.

Export changes within a time range

Incremental exports capture only the changes between two timestamps, reducing data transfer for pipelines that process updates continuously.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.dynamodb.TableExport("example", {
    exportType: "INCREMENTAL_EXPORT",
    s3Bucket: exampleAwsS3Bucket.id,
    tableArn: exampleAwsDynamodbTable.arn,
    incrementalExportSpecification: {
        exportFromTime: "2025-02-09T12:00:00+01:00",
        exportToTime: "2025-02-09T13:00:00+01:00",
    },
});
import pulumi
import pulumi_aws as aws

example = aws.dynamodb.TableExport("example",
    export_type="INCREMENTAL_EXPORT",
    s3_bucket=example_aws_s3_bucket["id"],
    table_arn=example_aws_dynamodb_table["arn"],
    incremental_export_specification={
        "export_from_time": "2025-02-09T12:00:00+01:00",
        "export_to_time": "2025-02-09T13:00:00+01:00",
    })
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/dynamodb"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dynamodb.NewTableExport(ctx, "example", &dynamodb.TableExportArgs{
			ExportType: pulumi.String("INCREMENTAL_EXPORT"),
			S3Bucket:   pulumi.Any(exampleAwsS3Bucket.Id),
			TableArn:   pulumi.Any(exampleAwsDynamodbTable.Arn),
			IncrementalExportSpecification: &dynamodb.TableExportIncrementalExportSpecificationArgs{
				ExportFromTime: pulumi.String("2025-02-09T12:00:00+01:00"),
				ExportToTime:   pulumi.String("2025-02-09T13:00:00+01:00"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var example = new Aws.DynamoDB.TableExport("example", new()
    {
        ExportType = "INCREMENTAL_EXPORT",
        S3Bucket = exampleAwsS3Bucket.Id,
        TableArn = exampleAwsDynamodbTable.Arn,
        IncrementalExportSpecification = new Aws.DynamoDB.Inputs.TableExportIncrementalExportSpecificationArgs
        {
            ExportFromTime = "2025-02-09T12:00:00+01:00",
            ExportToTime = "2025-02-09T13:00:00+01:00",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.dynamodb.TableExport;
import com.pulumi.aws.dynamodb.TableExportArgs;
import com.pulumi.aws.dynamodb.inputs.TableExportIncrementalExportSpecificationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new TableExport("example", TableExportArgs.builder()
            .exportType("INCREMENTAL_EXPORT")
            .s3Bucket(exampleAwsS3Bucket.id())
            .tableArn(exampleAwsDynamodbTable.arn())
            .incrementalExportSpecification(TableExportIncrementalExportSpecificationArgs.builder()
                .exportFromTime("2025-02-09T12:00:00+01:00")
                .exportToTime("2025-02-09T13:00:00+01:00")
                .build())
            .build());

    }
}
resources:
  example:
    type: aws:dynamodb:TableExport
    properties:
      exportType: INCREMENTAL_EXPORT
      s3Bucket: ${exampleAwsS3Bucket.id}
      tableArn: ${exampleAwsDynamodbTable.arn}
      incrementalExportSpecification:
        exportFromTime: 2025-02-09T12:00:00+01:00
        exportToTime: 2025-02-09T13:00:00+01:00

Setting exportType to INCREMENTAL_EXPORT switches from full snapshots to change-only exports. The incrementalExportSpecification block defines the time window: exportFromTime and exportToTime specify the start and end of the range. DynamoDB exports only items that changed during this period, making it efficient for streaming pipelines that process updates in batches.

Beyond these examples

These snippets focus on specific export features: full and incremental export types, and point-in-time snapshots. They’re intentionally minimal rather than complete data pipeline configurations.

The examples reference pre-existing infrastructure such as DynamoDB tables with point-in-time recovery enabled, and S3 buckets with export permissions configured. They focus on export configuration rather than provisioning the surrounding infrastructure.

To keep things focused, common export patterns are omitted, including:

  • Export format selection (exportFormat: DYNAMODB_JSON vs ION)
  • S3 path organization (s3Prefix)
  • Encryption configuration (s3SseAlgorithm, s3SseKmsKeyId)
  • Cross-account exports (s3BucketOwner)

These omissions are intentional: the goal is to illustrate how each export feature is wired, not provide drop-in data pipeline modules. See the DynamoDB TableExport resource reference for all available configuration options.

Let's export AWS DynamoDB Tables

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

Export Configuration & Types
What's the difference between full and incremental exports?
Full exports (FULL_EXPORT, the default) snapshot the entire table at a point in time. Incremental exports (INCREMENTAL_EXPORT) export data within a specific time range and require incrementalExportSpecification with exportFromTime and exportToTime.
Can I modify an export after creation?
No, nearly all properties (exportTime, exportType, s3Bucket, tableArn, etc.) are immutable and force resource recreation. Plan your export configuration carefully before creation.
How do I export data from a specific time range?
Set exportType to INCREMENTAL_EXPORT and provide incrementalExportSpecification with exportFromTime and exportToTime in RFC3339 format.
How do I export a table snapshot from a specific point in time?
Set exportTime to an RFC3339 timestamp. Omitting exportTime results in a snapshot from the current time.
Does my DynamoDB table need point-in-time recovery enabled?
The basic usage example shows pointInTimeRecovery enabled on the table, suggesting it may be required for exports. Check AWS documentation for current requirements.
S3 & Storage Configuration
What encryption options can I use for exported data?
Use s3SseAlgorithm with either AES256 or KMS. If using KMS, provide s3SseKmsKeyId with your KMS key ID.
How do I configure S3 bucket permissions for exports?
The schema references AWS Documentation for S3 bucket permission configuration. Ensure your bucket has the necessary permissions for DynamoDB to write export data.
Export Formats & Output
What export formats are available?
You can export in DYNAMODB_JSON (default) or ION format using the exportFormat property. See AWS Documentation for details on each format’s structure.
How do I track the progress of my export?
Check the exportStatus output property, which shows IN_PROGRESS, COMPLETED, or FAILED. Additional outputs include startTime, endTime, itemCount, and billedSizeInBytes.
What is the manifest file and where can I find it?
The manifest file describes the export’s structure and location. Its S3 key is available in the manifestFilesS3Key output property after export completion.

Using a different cloud?

Explore database guides for other cloud providers: