Create GCP Regional Persistent Disks

The gcp:compute/regionDisk:RegionDisk resource, part of the Pulumi GCP provider, provisions regional persistent disks that replicate data across multiple zones within a region for high availability. This guide focuses on three capabilities: snapshot-based disk creation, cross-region async replication, and multi-instance write access with Hyperdisk.

Regional disks may reference existing snapshots or primary disks for replication, and require zone availability within the target region. The examples are intentionally small. Combine them with your own compute instances, encryption keys, and performance tuning.

Create a regional disk from a snapshot

Most deployments start by creating a disk from a snapshot, which provides a point-in-time copy of data replicated across zones for high availability.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const disk = new gcp.compute.Disk("disk", {
    name: "my-disk",
    image: "debian-cloud/debian-11",
    size: 50,
    type: "pd-ssd",
    zone: "us-central1-a",
});
const snapdisk = new gcp.compute.Snapshot("snapdisk", {
    name: "my-snapshot",
    sourceDisk: disk.name,
    zone: "us-central1-a",
});
const regiondisk = new gcp.compute.RegionDisk("regiondisk", {
    name: "my-region-disk",
    snapshot: snapdisk.id,
    type: "pd-ssd",
    region: "us-central1",
    physicalBlockSizeBytes: 4096,
    replicaZones: [
        "us-central1-a",
        "us-central1-f",
    ],
});
import pulumi
import pulumi_gcp as gcp

disk = gcp.compute.Disk("disk",
    name="my-disk",
    image="debian-cloud/debian-11",
    size=50,
    type="pd-ssd",
    zone="us-central1-a")
snapdisk = gcp.compute.Snapshot("snapdisk",
    name="my-snapshot",
    source_disk=disk.name,
    zone="us-central1-a")
regiondisk = gcp.compute.RegionDisk("regiondisk",
    name="my-region-disk",
    snapshot=snapdisk.id,
    type="pd-ssd",
    region="us-central1",
    physical_block_size_bytes=4096,
    replica_zones=[
        "us-central1-a",
        "us-central1-f",
    ])
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		disk, err := compute.NewDisk(ctx, "disk", &compute.DiskArgs{
			Name:  pulumi.String("my-disk"),
			Image: pulumi.String("debian-cloud/debian-11"),
			Size:  pulumi.Int(50),
			Type:  pulumi.String("pd-ssd"),
			Zone:  pulumi.String("us-central1-a"),
		})
		if err != nil {
			return err
		}
		snapdisk, err := compute.NewSnapshot(ctx, "snapdisk", &compute.SnapshotArgs{
			Name:       pulumi.String("my-snapshot"),
			SourceDisk: disk.Name,
			Zone:       pulumi.String("us-central1-a"),
		})
		if err != nil {
			return err
		}
		_, err = compute.NewRegionDisk(ctx, "regiondisk", &compute.RegionDiskArgs{
			Name:                   pulumi.String("my-region-disk"),
			Snapshot:               snapdisk.ID(),
			Type:                   pulumi.String("pd-ssd"),
			Region:                 pulumi.String("us-central1"),
			PhysicalBlockSizeBytes: pulumi.Int(4096),
			ReplicaZones: pulumi.StringArray{
				pulumi.String("us-central1-a"),
				pulumi.String("us-central1-f"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var disk = new Gcp.Compute.Disk("disk", new()
    {
        Name = "my-disk",
        Image = "debian-cloud/debian-11",
        Size = 50,
        Type = "pd-ssd",
        Zone = "us-central1-a",
    });

    var snapdisk = new Gcp.Compute.Snapshot("snapdisk", new()
    {
        Name = "my-snapshot",
        SourceDisk = disk.Name,
        Zone = "us-central1-a",
    });

    var regiondisk = new Gcp.Compute.RegionDisk("regiondisk", new()
    {
        Name = "my-region-disk",
        Snapshot = snapdisk.Id,
        Type = "pd-ssd",
        Region = "us-central1",
        PhysicalBlockSizeBytes = 4096,
        ReplicaZones = new[]
        {
            "us-central1-a",
            "us-central1-f",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.Disk;
import com.pulumi.gcp.compute.DiskArgs;
import com.pulumi.gcp.compute.Snapshot;
import com.pulumi.gcp.compute.SnapshotArgs;
import com.pulumi.gcp.compute.RegionDisk;
import com.pulumi.gcp.compute.RegionDiskArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var disk = new Disk("disk", DiskArgs.builder()
            .name("my-disk")
            .image("debian-cloud/debian-11")
            .size(50)
            .type("pd-ssd")
            .zone("us-central1-a")
            .build());

        var snapdisk = new Snapshot("snapdisk", SnapshotArgs.builder()
            .name("my-snapshot")
            .sourceDisk(disk.name())
            .zone("us-central1-a")
            .build());

        var regiondisk = new RegionDisk("regiondisk", RegionDiskArgs.builder()
            .name("my-region-disk")
            .snapshot(snapdisk.id())
            .type("pd-ssd")
            .region("us-central1")
            .physicalBlockSizeBytes(4096)
            .replicaZones(            
                "us-central1-a",
                "us-central1-f")
            .build());

    }
}
resources:
  regiondisk:
    type: gcp:compute:RegionDisk
    properties:
      name: my-region-disk
      snapshot: ${snapdisk.id}
      type: pd-ssd
      region: us-central1
      physicalBlockSizeBytes: 4096
      replicaZones:
        - us-central1-a
        - us-central1-f
  disk:
    type: gcp:compute:Disk
    properties:
      name: my-disk
      image: debian-cloud/debian-11
      size: 50
      type: pd-ssd
      zone: us-central1-a
  snapdisk:
    type: gcp:compute:Snapshot
    properties:
      name: my-snapshot
      sourceDisk: ${disk.name}
      zone: us-central1-a

The snapshot property references an existing snapshot, and Compute Engine replicates the data to both zones listed in replicaZones. The type property sets the disk performance tier (pd-ssd for solid-state drives), while physicalBlockSizeBytes controls the block size for I/O operations. Regional disks automatically distribute data across zones, so if one zone fails, your data remains accessible from the other.

Replicate disks across regions asynchronously

Disaster recovery strategies often require data replication across geographic regions with eventual consistency.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const primary = new gcp.compute.RegionDisk("primary", {
    name: "primary-region-disk",
    type: "pd-ssd",
    region: "us-central1",
    physicalBlockSizeBytes: 4096,
    replicaZones: [
        "us-central1-a",
        "us-central1-f",
    ],
});
const secondary = new gcp.compute.RegionDisk("secondary", {
    name: "secondary-region-disk",
    type: "pd-ssd",
    region: "us-east1",
    physicalBlockSizeBytes: 4096,
    asyncPrimaryDisk: {
        disk: primary.id,
    },
    replicaZones: [
        "us-east1-b",
        "us-east1-c",
    ],
});
import pulumi
import pulumi_gcp as gcp

primary = gcp.compute.RegionDisk("primary",
    name="primary-region-disk",
    type="pd-ssd",
    region="us-central1",
    physical_block_size_bytes=4096,
    replica_zones=[
        "us-central1-a",
        "us-central1-f",
    ])
secondary = gcp.compute.RegionDisk("secondary",
    name="secondary-region-disk",
    type="pd-ssd",
    region="us-east1",
    physical_block_size_bytes=4096,
    async_primary_disk={
        "disk": primary.id,
    },
    replica_zones=[
        "us-east1-b",
        "us-east1-c",
    ])
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		primary, err := compute.NewRegionDisk(ctx, "primary", &compute.RegionDiskArgs{
			Name:                   pulumi.String("primary-region-disk"),
			Type:                   pulumi.String("pd-ssd"),
			Region:                 pulumi.String("us-central1"),
			PhysicalBlockSizeBytes: pulumi.Int(4096),
			ReplicaZones: pulumi.StringArray{
				pulumi.String("us-central1-a"),
				pulumi.String("us-central1-f"),
			},
		})
		if err != nil {
			return err
		}
		_, err = compute.NewRegionDisk(ctx, "secondary", &compute.RegionDiskArgs{
			Name:                   pulumi.String("secondary-region-disk"),
			Type:                   pulumi.String("pd-ssd"),
			Region:                 pulumi.String("us-east1"),
			PhysicalBlockSizeBytes: pulumi.Int(4096),
			AsyncPrimaryDisk: &compute.RegionDiskAsyncPrimaryDiskArgs{
				Disk: primary.ID(),
			},
			ReplicaZones: pulumi.StringArray{
				pulumi.String("us-east1-b"),
				pulumi.String("us-east1-c"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var primary = new Gcp.Compute.RegionDisk("primary", new()
    {
        Name = "primary-region-disk",
        Type = "pd-ssd",
        Region = "us-central1",
        PhysicalBlockSizeBytes = 4096,
        ReplicaZones = new[]
        {
            "us-central1-a",
            "us-central1-f",
        },
    });

    var secondary = new Gcp.Compute.RegionDisk("secondary", new()
    {
        Name = "secondary-region-disk",
        Type = "pd-ssd",
        Region = "us-east1",
        PhysicalBlockSizeBytes = 4096,
        AsyncPrimaryDisk = new Gcp.Compute.Inputs.RegionDiskAsyncPrimaryDiskArgs
        {
            Disk = primary.Id,
        },
        ReplicaZones = new[]
        {
            "us-east1-b",
            "us-east1-c",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.RegionDisk;
import com.pulumi.gcp.compute.RegionDiskArgs;
import com.pulumi.gcp.compute.inputs.RegionDiskAsyncPrimaryDiskArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var primary = new RegionDisk("primary", RegionDiskArgs.builder()
            .name("primary-region-disk")
            .type("pd-ssd")
            .region("us-central1")
            .physicalBlockSizeBytes(4096)
            .replicaZones(            
                "us-central1-a",
                "us-central1-f")
            .build());

        var secondary = new RegionDisk("secondary", RegionDiskArgs.builder()
            .name("secondary-region-disk")
            .type("pd-ssd")
            .region("us-east1")
            .physicalBlockSizeBytes(4096)
            .asyncPrimaryDisk(RegionDiskAsyncPrimaryDiskArgs.builder()
                .disk(primary.id())
                .build())
            .replicaZones(            
                "us-east1-b",
                "us-east1-c")
            .build());

    }
}
resources:
  primary:
    type: gcp:compute:RegionDisk
    properties:
      name: primary-region-disk
      type: pd-ssd
      region: us-central1
      physicalBlockSizeBytes: 4096
      replicaZones:
        - us-central1-a
        - us-central1-f
  secondary:
    type: gcp:compute:RegionDisk
    properties:
      name: secondary-region-disk
      type: pd-ssd
      region: us-east1
      physicalBlockSizeBytes: 4096
      asyncPrimaryDisk:
        disk: ${primary.id}
      replicaZones:
        - us-east1-b
        - us-east1-c

The asyncPrimaryDisk property links the secondary disk to a primary disk in a different region. Compute Engine continuously replicates changes from us-central1 to us-east1, providing geographic redundancy. Each disk maintains its own replicaZones for zone-level high availability within its region.

Enable multi-instance write access with Hyperdisk

Shared storage workloads like clustered databases require multiple instances to write to the same disk simultaneously.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const primary = new gcp.compute.RegionDisk("primary", {
    name: "my-region-hyperdisk",
    type: "hyperdisk-balanced-high-availability",
    region: "us-central1",
    replicaZones: [
        "us-central1-a",
        "us-central1-f",
    ],
    accessMode: "READ_WRITE_MANY",
});
import pulumi
import pulumi_gcp as gcp

primary = gcp.compute.RegionDisk("primary",
    name="my-region-hyperdisk",
    type="hyperdisk-balanced-high-availability",
    region="us-central1",
    replica_zones=[
        "us-central1-a",
        "us-central1-f",
    ],
    access_mode="READ_WRITE_MANY")
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewRegionDisk(ctx, "primary", &compute.RegionDiskArgs{
			Name:   pulumi.String("my-region-hyperdisk"),
			Type:   pulumi.String("hyperdisk-balanced-high-availability"),
			Region: pulumi.String("us-central1"),
			ReplicaZones: pulumi.StringArray{
				pulumi.String("us-central1-a"),
				pulumi.String("us-central1-f"),
			},
			AccessMode: pulumi.String("READ_WRITE_MANY"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var primary = new Gcp.Compute.RegionDisk("primary", new()
    {
        Name = "my-region-hyperdisk",
        Type = "hyperdisk-balanced-high-availability",
        Region = "us-central1",
        ReplicaZones = new[]
        {
            "us-central1-a",
            "us-central1-f",
        },
        AccessMode = "READ_WRITE_MANY",
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.RegionDisk;
import com.pulumi.gcp.compute.RegionDiskArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var primary = new RegionDisk("primary", RegionDiskArgs.builder()
            .name("my-region-hyperdisk")
            .type("hyperdisk-balanced-high-availability")
            .region("us-central1")
            .replicaZones(            
                "us-central1-a",
                "us-central1-f")
            .accessMode("READ_WRITE_MANY")
            .build());

    }
}
resources:
  primary:
    type: gcp:compute:RegionDisk
    properties:
      name: my-region-hyperdisk
      type: hyperdisk-balanced-high-availability
      region: us-central1
      replicaZones:
        - us-central1-a
        - us-central1-f
      accessMode: READ_WRITE_MANY

The accessMode property set to READ_WRITE_MANY allows multiple instances to attach and write concurrently. This mode is only available with Hyperdisk types like hyperdisk-balanced-high-availability. The disk still replicates across zones for high availability, but now supports shared access patterns required by clustered applications.

Beyond these examples

These snippets focus on specific regional disk features: snapshot-based disk creation, cross-region async replication, and multi-instance write access. They’re intentionally minimal rather than full storage solutions.

The examples may reference pre-existing infrastructure such as zonal disks and snapshots for snapshot-based creation, and primary regional disks for async replication. They focus on configuring the regional disk rather than provisioning everything around it.

To keep things focused, common regional disk patterns are omitted, including:

  • Customer-managed encryption keys (diskEncryptionKey)
  • Guest OS features and licenses for bootable disks
  • Performance tuning (provisionedIops, provisionedThroughput)
  • Snapshot lifecycle management (createSnapshotBeforeDestroy)

These omissions are intentional: the goal is to illustrate how each regional disk feature is wired, not provide drop-in storage modules. See the RegionDisk resource reference for all available configuration options.

Let's create GCP Regional Persistent Disks

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

Security & Encryption
Are my customer-supplied encryption keys stored securely in Pulumi state?
No, customer-supplied encryption keys (disk_encryption_key.raw_key and disk_encryption_key.rsa_encrypted_key) are stored in plain text in the Pulumi state file. Ensure your state file is properly secured when using customer-supplied encryption keys.
Disk Configuration & Immutability
What properties can't I change after creating a regional disk?
The following properties are immutable: name, region, replicaZones, type, physicalBlockSizeBytes, snapshot, sourceDisk, asyncPrimaryDisk, diskEncryptionKey, guestOsFeatures, licenses, description, and project. You must recreate the disk to change any of these.
What block sizes are supported for regional disks?
Regional disks support physical block sizes of 4096 bytes and 16384 bytes. This property is immutable after creation.
Access Modes & Multi-Instance Attachment
Can I attach a regional disk to multiple instances simultaneously?
Yes, but only with Hyperdisk types. Set accessMode to READ_WRITE_MANY (multiple instances with read/write) or READ_ONLY_SINGLE (multiple instances with read-only). The default READ_WRITE_SINGLE allows only single-instance attachment.
What disk types support multi-instance attachment?
Only Hyperdisk types (such as hyperdisk-balanced-high-availability) support the READ_WRITE_MANY access mode required for multi-instance attachment.
Replication & High Availability
How do I set up cross-region disk replication?
Create a secondary disk in a different region with asyncPrimaryDisk.disk pointing to the primary disk’s ID. The secondary disk will asynchronously replicate data from the primary.
How many zones must I specify for replicaZones?
You must specify at least two zones in the replicaZones array for regional disk replication. For example, ["us-central1-a", "us-central1-f"].
Performance & Sizing
How do I configure IOPS for high-performance workloads?
Use provisionedIops to set IOPS for Extreme persistent disks. Values must be between 10,000 and 120,000.
What's the minimum disk size when creating from a snapshot or image?
The size value must not be less than the size of the source snapshot or source image you’re using to create the disk.
How do I configure disk throughput?
Set provisionedThroughput to specify throughput in MB per second. Values must be greater than or equal to 1.
Lifecycle Management
Can I automatically create a snapshot before deleting a disk?
Yes, set createSnapshotBeforeDestroy to true. The snapshot will be named {{disk-name}}-YYYYMMDD-HHmm by default (customizable with createSnapshotBeforeDestroyPrefix). Customer-managed encryption keys are automatically reused for the snapshot.
Should I still configure the interface property?
No, the interface property is deprecated and will be removed in a future release. Disk interfaces are now automatically determined on attachment, so you can safely remove this property from your configurations.

Using a different cloud?

Explore storage guides for other cloud providers: