The gcp:compute/regionDisk:RegionDisk resource, part of the Pulumi GCP provider, provisions regional persistent disks that replicate data across multiple zones within a region for high availability. This guide focuses on four capabilities: snapshot-based disk creation, cross-region async replication, guest OS features and licensing, and multi-instance write access with Hyperdisk.
Regional disks may reference existing snapshots or primary disks for replication, and require zone availability within the target region. The examples are intentionally small. Combine them with your own VM instances, encryption keys, and performance tuning.
Create a regional disk from a snapshot
Most deployments start by creating a disk from a snapshot, which provides a point-in-time copy of data replicated across zones for high availability.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const disk = new gcp.compute.Disk("disk", {
name: "my-disk",
image: "debian-cloud/debian-11",
size: 50,
type: "pd-ssd",
zone: "us-central1-a",
});
const snapdisk = new gcp.compute.Snapshot("snapdisk", {
name: "my-snapshot",
sourceDisk: disk.name,
zone: "us-central1-a",
});
const regiondisk = new gcp.compute.RegionDisk("regiondisk", {
name: "my-region-disk",
snapshot: snapdisk.id,
type: "pd-ssd",
region: "us-central1",
physicalBlockSizeBytes: 4096,
replicaZones: [
"us-central1-a",
"us-central1-f",
],
});
import pulumi
import pulumi_gcp as gcp
disk = gcp.compute.Disk("disk",
name="my-disk",
image="debian-cloud/debian-11",
size=50,
type="pd-ssd",
zone="us-central1-a")
snapdisk = gcp.compute.Snapshot("snapdisk",
name="my-snapshot",
source_disk=disk.name,
zone="us-central1-a")
regiondisk = gcp.compute.RegionDisk("regiondisk",
name="my-region-disk",
snapshot=snapdisk.id,
type="pd-ssd",
region="us-central1",
physical_block_size_bytes=4096,
replica_zones=[
"us-central1-a",
"us-central1-f",
])
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
disk, err := compute.NewDisk(ctx, "disk", &compute.DiskArgs{
Name: pulumi.String("my-disk"),
Image: pulumi.String("debian-cloud/debian-11"),
Size: pulumi.Int(50),
Type: pulumi.String("pd-ssd"),
Zone: pulumi.String("us-central1-a"),
})
if err != nil {
return err
}
snapdisk, err := compute.NewSnapshot(ctx, "snapdisk", &compute.SnapshotArgs{
Name: pulumi.String("my-snapshot"),
SourceDisk: disk.Name,
Zone: pulumi.String("us-central1-a"),
})
if err != nil {
return err
}
_, err = compute.NewRegionDisk(ctx, "regiondisk", &compute.RegionDiskArgs{
Name: pulumi.String("my-region-disk"),
Snapshot: snapdisk.ID(),
Type: pulumi.String("pd-ssd"),
Region: pulumi.String("us-central1"),
PhysicalBlockSizeBytes: pulumi.Int(4096),
ReplicaZones: pulumi.StringArray{
pulumi.String("us-central1-a"),
pulumi.String("us-central1-f"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var disk = new Gcp.Compute.Disk("disk", new()
{
Name = "my-disk",
Image = "debian-cloud/debian-11",
Size = 50,
Type = "pd-ssd",
Zone = "us-central1-a",
});
var snapdisk = new Gcp.Compute.Snapshot("snapdisk", new()
{
Name = "my-snapshot",
SourceDisk = disk.Name,
Zone = "us-central1-a",
});
var regiondisk = new Gcp.Compute.RegionDisk("regiondisk", new()
{
Name = "my-region-disk",
Snapshot = snapdisk.Id,
Type = "pd-ssd",
Region = "us-central1",
PhysicalBlockSizeBytes = 4096,
ReplicaZones = new[]
{
"us-central1-a",
"us-central1-f",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.Disk;
import com.pulumi.gcp.compute.DiskArgs;
import com.pulumi.gcp.compute.Snapshot;
import com.pulumi.gcp.compute.SnapshotArgs;
import com.pulumi.gcp.compute.RegionDisk;
import com.pulumi.gcp.compute.RegionDiskArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var disk = new Disk("disk", DiskArgs.builder()
.name("my-disk")
.image("debian-cloud/debian-11")
.size(50)
.type("pd-ssd")
.zone("us-central1-a")
.build());
var snapdisk = new Snapshot("snapdisk", SnapshotArgs.builder()
.name("my-snapshot")
.sourceDisk(disk.name())
.zone("us-central1-a")
.build());
var regiondisk = new RegionDisk("regiondisk", RegionDiskArgs.builder()
.name("my-region-disk")
.snapshot(snapdisk.id())
.type("pd-ssd")
.region("us-central1")
.physicalBlockSizeBytes(4096)
.replicaZones(
"us-central1-a",
"us-central1-f")
.build());
}
}
resources:
regiondisk:
type: gcp:compute:RegionDisk
properties:
name: my-region-disk
snapshot: ${snapdisk.id}
type: pd-ssd
region: us-central1
physicalBlockSizeBytes: 4096
replicaZones:
- us-central1-a
- us-central1-f
disk:
type: gcp:compute:Disk
properties:
name: my-disk
image: debian-cloud/debian-11
size: 50
type: pd-ssd
zone: us-central1-a
snapdisk:
type: gcp:compute:Snapshot
properties:
name: my-snapshot
sourceDisk: ${disk.name}
zone: us-central1-a
The snapshot property references an existing snapshot, which the regional disk restores across the specified replicaZones. The type property determines performance characteristics (pd-ssd for solid-state drives), while physicalBlockSizeBytes sets the block size for I/O operations. Data is automatically replicated between us-central1-a and us-central1-f, providing zone-level redundancy.
Replicate disks across regions asynchronously
Disaster recovery strategies often require data replication across geographic regions. Asynchronous replication creates a secondary disk that continuously receives updates from the primary.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const primary = new gcp.compute.RegionDisk("primary", {
name: "primary-region-disk",
type: "pd-ssd",
region: "us-central1",
physicalBlockSizeBytes: 4096,
replicaZones: [
"us-central1-a",
"us-central1-f",
],
});
const secondary = new gcp.compute.RegionDisk("secondary", {
name: "secondary-region-disk",
type: "pd-ssd",
region: "us-east1",
physicalBlockSizeBytes: 4096,
asyncPrimaryDisk: {
disk: primary.id,
},
replicaZones: [
"us-east1-b",
"us-east1-c",
],
});
import pulumi
import pulumi_gcp as gcp
primary = gcp.compute.RegionDisk("primary",
name="primary-region-disk",
type="pd-ssd",
region="us-central1",
physical_block_size_bytes=4096,
replica_zones=[
"us-central1-a",
"us-central1-f",
])
secondary = gcp.compute.RegionDisk("secondary",
name="secondary-region-disk",
type="pd-ssd",
region="us-east1",
physical_block_size_bytes=4096,
async_primary_disk={
"disk": primary.id,
},
replica_zones=[
"us-east1-b",
"us-east1-c",
])
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
primary, err := compute.NewRegionDisk(ctx, "primary", &compute.RegionDiskArgs{
Name: pulumi.String("primary-region-disk"),
Type: pulumi.String("pd-ssd"),
Region: pulumi.String("us-central1"),
PhysicalBlockSizeBytes: pulumi.Int(4096),
ReplicaZones: pulumi.StringArray{
pulumi.String("us-central1-a"),
pulumi.String("us-central1-f"),
},
})
if err != nil {
return err
}
_, err = compute.NewRegionDisk(ctx, "secondary", &compute.RegionDiskArgs{
Name: pulumi.String("secondary-region-disk"),
Type: pulumi.String("pd-ssd"),
Region: pulumi.String("us-east1"),
PhysicalBlockSizeBytes: pulumi.Int(4096),
AsyncPrimaryDisk: &compute.RegionDiskAsyncPrimaryDiskArgs{
Disk: primary.ID(),
},
ReplicaZones: pulumi.StringArray{
pulumi.String("us-east1-b"),
pulumi.String("us-east1-c"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var primary = new Gcp.Compute.RegionDisk("primary", new()
{
Name = "primary-region-disk",
Type = "pd-ssd",
Region = "us-central1",
PhysicalBlockSizeBytes = 4096,
ReplicaZones = new[]
{
"us-central1-a",
"us-central1-f",
},
});
var secondary = new Gcp.Compute.RegionDisk("secondary", new()
{
Name = "secondary-region-disk",
Type = "pd-ssd",
Region = "us-east1",
PhysicalBlockSizeBytes = 4096,
AsyncPrimaryDisk = new Gcp.Compute.Inputs.RegionDiskAsyncPrimaryDiskArgs
{
Disk = primary.Id,
},
ReplicaZones = new[]
{
"us-east1-b",
"us-east1-c",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.RegionDisk;
import com.pulumi.gcp.compute.RegionDiskArgs;
import com.pulumi.gcp.compute.inputs.RegionDiskAsyncPrimaryDiskArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var primary = new RegionDisk("primary", RegionDiskArgs.builder()
.name("primary-region-disk")
.type("pd-ssd")
.region("us-central1")
.physicalBlockSizeBytes(4096)
.replicaZones(
"us-central1-a",
"us-central1-f")
.build());
var secondary = new RegionDisk("secondary", RegionDiskArgs.builder()
.name("secondary-region-disk")
.type("pd-ssd")
.region("us-east1")
.physicalBlockSizeBytes(4096)
.asyncPrimaryDisk(RegionDiskAsyncPrimaryDiskArgs.builder()
.disk(primary.id())
.build())
.replicaZones(
"us-east1-b",
"us-east1-c")
.build());
}
}
resources:
primary:
type: gcp:compute:RegionDisk
properties:
name: primary-region-disk
type: pd-ssd
region: us-central1
physicalBlockSizeBytes: 4096
replicaZones:
- us-central1-a
- us-central1-f
secondary:
type: gcp:compute:RegionDisk
properties:
name: secondary-region-disk
type: pd-ssd
region: us-east1
physicalBlockSizeBytes: 4096
asyncPrimaryDisk:
disk: ${primary.id}
replicaZones:
- us-east1-b
- us-east1-c
The asyncPrimaryDisk property links the secondary disk to a primary disk in a different region. Updates flow from the primary (us-central1) to the secondary (us-east1) asynchronously, providing cross-region disaster recovery. Each disk maintains its own replicaZones for zone-level redundancy within its region.
Configure guest OS features and licenses
Bootable disks for Windows or specialized workloads require specific guest OS features and license declarations.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const regiondisk = new gcp.compute.RegionDisk("regiondisk", {
name: "my-region-features-disk",
type: "pd-ssd",
region: "us-central1",
physicalBlockSizeBytes: 4096,
guestOsFeatures: [
{
type: "SECURE_BOOT",
},
{
type: "MULTI_IP_SUBNET",
},
{
type: "WINDOWS",
},
],
licenses: ["https://www.googleapis.com/compute/v1/projects/windows-cloud/global/licenses/windows-server-core"],
replicaZones: [
"us-central1-a",
"us-central1-f",
],
});
import pulumi
import pulumi_gcp as gcp
regiondisk = gcp.compute.RegionDisk("regiondisk",
name="my-region-features-disk",
type="pd-ssd",
region="us-central1",
physical_block_size_bytes=4096,
guest_os_features=[
{
"type": "SECURE_BOOT",
},
{
"type": "MULTI_IP_SUBNET",
},
{
"type": "WINDOWS",
},
],
licenses=["https://www.googleapis.com/compute/v1/projects/windows-cloud/global/licenses/windows-server-core"],
replica_zones=[
"us-central1-a",
"us-central1-f",
])
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := compute.NewRegionDisk(ctx, "regiondisk", &compute.RegionDiskArgs{
Name: pulumi.String("my-region-features-disk"),
Type: pulumi.String("pd-ssd"),
Region: pulumi.String("us-central1"),
PhysicalBlockSizeBytes: pulumi.Int(4096),
GuestOsFeatures: compute.RegionDiskGuestOsFeatureArray{
&compute.RegionDiskGuestOsFeatureArgs{
Type: pulumi.String("SECURE_BOOT"),
},
&compute.RegionDiskGuestOsFeatureArgs{
Type: pulumi.String("MULTI_IP_SUBNET"),
},
&compute.RegionDiskGuestOsFeatureArgs{
Type: pulumi.String("WINDOWS"),
},
},
Licenses: pulumi.StringArray{
pulumi.String("https://www.googleapis.com/compute/v1/projects/windows-cloud/global/licenses/windows-server-core"),
},
ReplicaZones: pulumi.StringArray{
pulumi.String("us-central1-a"),
pulumi.String("us-central1-f"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var regiondisk = new Gcp.Compute.RegionDisk("regiondisk", new()
{
Name = "my-region-features-disk",
Type = "pd-ssd",
Region = "us-central1",
PhysicalBlockSizeBytes = 4096,
GuestOsFeatures = new[]
{
new Gcp.Compute.Inputs.RegionDiskGuestOsFeatureArgs
{
Type = "SECURE_BOOT",
},
new Gcp.Compute.Inputs.RegionDiskGuestOsFeatureArgs
{
Type = "MULTI_IP_SUBNET",
},
new Gcp.Compute.Inputs.RegionDiskGuestOsFeatureArgs
{
Type = "WINDOWS",
},
},
Licenses = new[]
{
"https://www.googleapis.com/compute/v1/projects/windows-cloud/global/licenses/windows-server-core",
},
ReplicaZones = new[]
{
"us-central1-a",
"us-central1-f",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.RegionDisk;
import com.pulumi.gcp.compute.RegionDiskArgs;
import com.pulumi.gcp.compute.inputs.RegionDiskGuestOsFeatureArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var regiondisk = new RegionDisk("regiondisk", RegionDiskArgs.builder()
.name("my-region-features-disk")
.type("pd-ssd")
.region("us-central1")
.physicalBlockSizeBytes(4096)
.guestOsFeatures(
RegionDiskGuestOsFeatureArgs.builder()
.type("SECURE_BOOT")
.build(),
RegionDiskGuestOsFeatureArgs.builder()
.type("MULTI_IP_SUBNET")
.build(),
RegionDiskGuestOsFeatureArgs.builder()
.type("WINDOWS")
.build())
.licenses("https://www.googleapis.com/compute/v1/projects/windows-cloud/global/licenses/windows-server-core")
.replicaZones(
"us-central1-a",
"us-central1-f")
.build());
}
}
resources:
regiondisk:
type: gcp:compute:RegionDisk
properties:
name: my-region-features-disk
type: pd-ssd
region: us-central1
physicalBlockSizeBytes: 4096
guestOsFeatures:
- type: SECURE_BOOT
- type: MULTI_IP_SUBNET
- type: WINDOWS
licenses:
- https://www.googleapis.com/compute/v1/projects/windows-cloud/global/licenses/windows-server-core
replicaZones:
- us-central1-a
- us-central1-f
The guestOsFeatures array enables capabilities like SECURE_BOOT, MULTI_IP_SUBNET, and WINDOWS support. The licenses property declares Windows Server licensing, which GCP validates during instance boot. These settings are immutable after disk creation and apply only to bootable disks.
Enable multi-instance write access with Hyperdisk
Shared storage scenarios like clustered databases require multiple instances to write to the same disk simultaneously.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const primary = new gcp.compute.RegionDisk("primary", {
name: "my-region-hyperdisk",
type: "hyperdisk-balanced-high-availability",
region: "us-central1",
replicaZones: [
"us-central1-a",
"us-central1-f",
],
accessMode: "READ_WRITE_MANY",
});
import pulumi
import pulumi_gcp as gcp
primary = gcp.compute.RegionDisk("primary",
name="my-region-hyperdisk",
type="hyperdisk-balanced-high-availability",
region="us-central1",
replica_zones=[
"us-central1-a",
"us-central1-f",
],
access_mode="READ_WRITE_MANY")
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := compute.NewRegionDisk(ctx, "primary", &compute.RegionDiskArgs{
Name: pulumi.String("my-region-hyperdisk"),
Type: pulumi.String("hyperdisk-balanced-high-availability"),
Region: pulumi.String("us-central1"),
ReplicaZones: pulumi.StringArray{
pulumi.String("us-central1-a"),
pulumi.String("us-central1-f"),
},
AccessMode: pulumi.String("READ_WRITE_MANY"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var primary = new Gcp.Compute.RegionDisk("primary", new()
{
Name = "my-region-hyperdisk",
Type = "hyperdisk-balanced-high-availability",
Region = "us-central1",
ReplicaZones = new[]
{
"us-central1-a",
"us-central1-f",
},
AccessMode = "READ_WRITE_MANY",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.RegionDisk;
import com.pulumi.gcp.compute.RegionDiskArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var primary = new RegionDisk("primary", RegionDiskArgs.builder()
.name("my-region-hyperdisk")
.type("hyperdisk-balanced-high-availability")
.region("us-central1")
.replicaZones(
"us-central1-a",
"us-central1-f")
.accessMode("READ_WRITE_MANY")
.build());
}
}
resources:
primary:
type: gcp:compute:RegionDisk
properties:
name: my-region-hyperdisk
type: hyperdisk-balanced-high-availability
region: us-central1
replicaZones:
- us-central1-a
- us-central1-f
accessMode: READ_WRITE_MANY
The accessMode property set to READ_WRITE_MANY allows multiple VM instances to mount and write to the disk concurrently. This requires the hyperdisk-balanced-high-availability disk type, which provides the necessary consistency guarantees for shared write access. Standard disk types only support single-instance attachment.
Beyond these examples
These snippets focus on specific regional disk features: snapshot-based disk creation, cross-region async replication, guest OS features and licensing, and multi-instance write access. They’re intentionally minimal rather than full storage solutions.
The examples may reference pre-existing infrastructure such as zonal disks and snapshots for snapshot-based creation, and primary regional disks for async replication. They focus on configuring the regional disk rather than provisioning the surrounding infrastructure.
To keep things focused, common regional disk patterns are omitted, including:
- Encryption with customer-managed keys (diskEncryptionKey)
- Performance tuning (provisionedIops, provisionedThroughput)
- Disk resizing and capacity management
- Snapshot creation before disk deletion (createSnapshotBeforeDestroy)
These omissions are intentional: the goal is to illustrate how each regional disk feature is wired, not provide drop-in storage modules. See the RegionDisk resource reference for all available configuration options.
Let's create GCP Regional Persistent Disks
Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.
Try Pulumi Cloud for FREEFrequently Asked Questions
Configuration & Immutability
name, region, replicaZones, physicalBlockSizeBytes, project, type, guestOsFeatures, licenses, snapshot, sourceDisk, asyncPrimaryDisk, description, diskEncryptionKey, and sourceSnapshotEncryptionKey. Plan these carefully during initial creation.[a-z][-a-z0-9]*[a-z0-9]?. The first character must be a lowercase letter, followed by dashes, lowercase letters, or digits, and the last character cannot be a dash.interface is deprecated and will be removed in a future release. Disk interfaces are automatically determined on attachment, so you can safely remove this field from your configuration.Replication & High Availability
replicaZones. Examples show configurations like ["us-central1-a", "us-central1-f"] for high availability within a region.asyncPrimaryDisk.disk pointing to the primary disk’s ID.READ_WRITE_SINGLE (default) allows one instance to attach in read-write mode. READ_WRITE_MANY allows multiple instances to attach in read-write mode. READ_ONLY_SINGLE allows multiple instances in read-only mode. Access modes are only valid for Hyperdisk disk types.accessMode to READ_WRITE_MANY and use a Hyperdisk type like hyperdisk-balanced-high-availability.Performance & Sizing
provisionedIops must be between 10,000 and 120,000.size along with sourceImage or sourceSnapshot, the size must not be less than the source’s size.Data Protection & Snapshots
createSnapshotBeforeDestroy to true. A snapshot will be created before the disk is destroyed, with the default name format {{disk-name}}-YYYYMMDD-HHmm. Customer-managed encryption keys will be reused for the snapshot.snapshot property to the snapshot ID or URL. You can provide partial or full URLs like global/snapshots/snapshot or projects/project/global/snapshots/snapshot.Labels & Metadata
labels field is non-authoritative and only manages labels present in your configuration. Use effectiveLabels to see all labels on the resource, including those set by other clients and services.