Configure GCP Compute Resource Policies

The gcp:compute/resourcePolicy:ResourcePolicy resource, part of the Pulumi GCP provider, defines policies that can be attached to compute resources to schedule snapshots, control instance lifecycles, or optimize VM placement. This guide focuses on four capabilities: snapshot scheduling with retention, instance start/stop automation, VM placement for low-latency workloads, and GPU topology configuration.

Resource policies are created independently but must be attached to persistent disks or VM instances to take effect. The examples are intentionally small. Combine them with your own disk and instance resources to apply these policies.

Schedule daily snapshots at a fixed time

Most snapshot policies start with a simple daily schedule that captures disk state at a consistent time, providing baseline backup coverage.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const foo = new gcp.compute.ResourcePolicy("foo", {
    name: "gce-policy",
    region: "us-central1",
    snapshotSchedulePolicy: {
        schedule: {
            dailySchedule: {
                daysInCycle: 1,
                startTime: "04:00",
            },
        },
    },
});
import pulumi
import pulumi_gcp as gcp

foo = gcp.compute.ResourcePolicy("foo",
    name="gce-policy",
    region="us-central1",
    snapshot_schedule_policy={
        "schedule": {
            "daily_schedule": {
                "days_in_cycle": 1,
                "start_time": "04:00",
            },
        },
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewResourcePolicy(ctx, "foo", &compute.ResourcePolicyArgs{
			Name:   pulumi.String("gce-policy"),
			Region: pulumi.String("us-central1"),
			SnapshotSchedulePolicy: &compute.ResourcePolicySnapshotSchedulePolicyArgs{
				Schedule: &compute.ResourcePolicySnapshotSchedulePolicyScheduleArgs{
					DailySchedule: &compute.ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs{
						DaysInCycle: pulumi.Int(1),
						StartTime:   pulumi.String("04:00"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var foo = new Gcp.Compute.ResourcePolicy("foo", new()
    {
        Name = "gce-policy",
        Region = "us-central1",
        SnapshotSchedulePolicy = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyArgs
        {
            Schedule = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyScheduleArgs
            {
                DailySchedule = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs
                {
                    DaysInCycle = 1,
                    StartTime = "04:00",
                },
            },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ResourcePolicy;
import com.pulumi.gcp.compute.ResourcePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyScheduleArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var foo = new ResourcePolicy("foo", ResourcePolicyArgs.builder()
            .name("gce-policy")
            .region("us-central1")
            .snapshotSchedulePolicy(ResourcePolicySnapshotSchedulePolicyArgs.builder()
                .schedule(ResourcePolicySnapshotSchedulePolicyScheduleArgs.builder()
                    .dailySchedule(ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs.builder()
                        .daysInCycle(1)
                        .startTime("04:00")
                        .build())
                    .build())
                .build())
            .build());

    }
}
resources:
  foo:
    type: gcp:compute:ResourcePolicy
    properties:
      name: gce-policy
      region: us-central1
      snapshotSchedulePolicy:
        schedule:
          dailySchedule:
            daysInCycle: 1
            startTime: 04:00

The snapshotSchedulePolicy defines when snapshots are created. The dailySchedule runs every day (daysInCycle: 1) at the specified startTime in 24-hour format. Attach this policy to a persistent disk to begin automated snapshots.

Configure snapshot retention and storage location

Production snapshot policies typically add retention limits to control storage costs and specify storage locations for compliance requirements.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const bar = new gcp.compute.ResourcePolicy("bar", {
    name: "gce-policy",
    region: "us-central1",
    snapshotSchedulePolicy: {
        schedule: {
            hourlySchedule: {
                hoursInCycle: 20,
                startTime: "23:00",
            },
        },
        retentionPolicy: {
            maxRetentionDays: 10,
            onSourceDiskDelete: "KEEP_AUTO_SNAPSHOTS",
        },
        snapshotProperties: {
            labels: {
                my_label: "value",
            },
            storageLocations: "us",
            guestFlush: true,
        },
    },
});
import pulumi
import pulumi_gcp as gcp

bar = gcp.compute.ResourcePolicy("bar",
    name="gce-policy",
    region="us-central1",
    snapshot_schedule_policy={
        "schedule": {
            "hourly_schedule": {
                "hours_in_cycle": 20,
                "start_time": "23:00",
            },
        },
        "retention_policy": {
            "max_retention_days": 10,
            "on_source_disk_delete": "KEEP_AUTO_SNAPSHOTS",
        },
        "snapshot_properties": {
            "labels": {
                "my_label": "value",
            },
            "storage_locations": "us",
            "guest_flush": True,
        },
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewResourcePolicy(ctx, "bar", &compute.ResourcePolicyArgs{
			Name:   pulumi.String("gce-policy"),
			Region: pulumi.String("us-central1"),
			SnapshotSchedulePolicy: &compute.ResourcePolicySnapshotSchedulePolicyArgs{
				Schedule: &compute.ResourcePolicySnapshotSchedulePolicyScheduleArgs{
					HourlySchedule: &compute.ResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleArgs{
						HoursInCycle: pulumi.Int(20),
						StartTime:    pulumi.String("23:00"),
					},
				},
				RetentionPolicy: &compute.ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs{
					MaxRetentionDays:   pulumi.Int(10),
					OnSourceDiskDelete: pulumi.String("KEEP_AUTO_SNAPSHOTS"),
				},
				SnapshotProperties: &compute.ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs{
					Labels: pulumi.StringMap{
						"my_label": pulumi.String("value"),
					},
					StorageLocations: pulumi.String("us"),
					GuestFlush:       pulumi.Bool(true),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var bar = new Gcp.Compute.ResourcePolicy("bar", new()
    {
        Name = "gce-policy",
        Region = "us-central1",
        SnapshotSchedulePolicy = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyArgs
        {
            Schedule = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyScheduleArgs
            {
                HourlySchedule = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleArgs
                {
                    HoursInCycle = 20,
                    StartTime = "23:00",
                },
            },
            RetentionPolicy = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs
            {
                MaxRetentionDays = 10,
                OnSourceDiskDelete = "KEEP_AUTO_SNAPSHOTS",
            },
            SnapshotProperties = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs
            {
                Labels = 
                {
                    { "my_label", "value" },
                },
                StorageLocations = "us",
                GuestFlush = true,
            },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ResourcePolicy;
import com.pulumi.gcp.compute.ResourcePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyScheduleArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var bar = new ResourcePolicy("bar", ResourcePolicyArgs.builder()
            .name("gce-policy")
            .region("us-central1")
            .snapshotSchedulePolicy(ResourcePolicySnapshotSchedulePolicyArgs.builder()
                .schedule(ResourcePolicySnapshotSchedulePolicyScheduleArgs.builder()
                    .hourlySchedule(ResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleArgs.builder()
                        .hoursInCycle(20)
                        .startTime("23:00")
                        .build())
                    .build())
                .retentionPolicy(ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs.builder()
                    .maxRetentionDays(10)
                    .onSourceDiskDelete("KEEP_AUTO_SNAPSHOTS")
                    .build())
                .snapshotProperties(ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs.builder()
                    .labels(Map.of("my_label", "value"))
                    .storageLocations("us")
                    .guestFlush(true)
                    .build())
                .build())
            .build());

    }
}
resources:
  bar:
    type: gcp:compute:ResourcePolicy
    properties:
      name: gce-policy
      region: us-central1
      snapshotSchedulePolicy:
        schedule:
          hourlySchedule:
            hoursInCycle: 20
            startTime: 23:00
        retentionPolicy:
          maxRetentionDays: 10
          onSourceDiskDelete: KEEP_AUTO_SNAPSHOTS
        snapshotProperties:
          labels:
            my_label: value
          storageLocations: us
          guestFlush: true

The hourlySchedule creates snapshots every 20 hours starting at 23:00. The retentionPolicy limits storage to 10 days and keeps snapshots even if the source disk is deleted (onSourceDiskDelete: “KEEP_AUTO_SNAPSHOTS”). The snapshotProperties block adds labels for organization, sets the storage location to “us”, and enables guestFlush for application-consistent snapshots on supported operating systems.

Start and stop instances on a schedule

Development and test environments often run on schedules to reduce costs by shutting down instances outside business hours.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const hourly = new gcp.compute.ResourcePolicy("hourly", {
    name: "gce-policy",
    region: "us-central1",
    description: "Start and stop instances",
    instanceSchedulePolicy: {
        vmStartSchedule: {
            schedule: "0 * * * *",
        },
        vmStopSchedule: {
            schedule: "15 * * * *",
        },
        timeZone: "US/Central",
    },
});
import pulumi
import pulumi_gcp as gcp

hourly = gcp.compute.ResourcePolicy("hourly",
    name="gce-policy",
    region="us-central1",
    description="Start and stop instances",
    instance_schedule_policy={
        "vm_start_schedule": {
            "schedule": "0 * * * *",
        },
        "vm_stop_schedule": {
            "schedule": "15 * * * *",
        },
        "time_zone": "US/Central",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewResourcePolicy(ctx, "hourly", &compute.ResourcePolicyArgs{
			Name:        pulumi.String("gce-policy"),
			Region:      pulumi.String("us-central1"),
			Description: pulumi.String("Start and stop instances"),
			InstanceSchedulePolicy: &compute.ResourcePolicyInstanceSchedulePolicyArgs{
				VmStartSchedule: &compute.ResourcePolicyInstanceSchedulePolicyVmStartScheduleArgs{
					Schedule: pulumi.String("0 * * * *"),
				},
				VmStopSchedule: &compute.ResourcePolicyInstanceSchedulePolicyVmStopScheduleArgs{
					Schedule: pulumi.String("15 * * * *"),
				},
				TimeZone: pulumi.String("US/Central"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var hourly = new Gcp.Compute.ResourcePolicy("hourly", new()
    {
        Name = "gce-policy",
        Region = "us-central1",
        Description = "Start and stop instances",
        InstanceSchedulePolicy = new Gcp.Compute.Inputs.ResourcePolicyInstanceSchedulePolicyArgs
        {
            VmStartSchedule = new Gcp.Compute.Inputs.ResourcePolicyInstanceSchedulePolicyVmStartScheduleArgs
            {
                Schedule = "0 * * * *",
            },
            VmStopSchedule = new Gcp.Compute.Inputs.ResourcePolicyInstanceSchedulePolicyVmStopScheduleArgs
            {
                Schedule = "15 * * * *",
            },
            TimeZone = "US/Central",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ResourcePolicy;
import com.pulumi.gcp.compute.ResourcePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicyInstanceSchedulePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicyInstanceSchedulePolicyVmStartScheduleArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicyInstanceSchedulePolicyVmStopScheduleArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var hourly = new ResourcePolicy("hourly", ResourcePolicyArgs.builder()
            .name("gce-policy")
            .region("us-central1")
            .description("Start and stop instances")
            .instanceSchedulePolicy(ResourcePolicyInstanceSchedulePolicyArgs.builder()
                .vmStartSchedule(ResourcePolicyInstanceSchedulePolicyVmStartScheduleArgs.builder()
                    .schedule("0 * * * *")
                    .build())
                .vmStopSchedule(ResourcePolicyInstanceSchedulePolicyVmStopScheduleArgs.builder()
                    .schedule("15 * * * *")
                    .build())
                .timeZone("US/Central")
                .build())
            .build());

    }
}
resources:
  hourly:
    type: gcp:compute:ResourcePolicy
    properties:
      name: gce-policy
      region: us-central1
      description: Start and stop instances
      instanceSchedulePolicy:
        vmStartSchedule:
          schedule: 0 * * * *
        vmStopSchedule:
          schedule: 15 * * * *
        timeZone: US/Central

The instanceSchedulePolicy defines two cron expressions: vmStartSchedule starts instances at the top of each hour, and vmStopSchedule stops them 15 minutes later. The timeZone property ensures schedules run in the correct local time. Attach this policy to VM instances to automate their lifecycle.

Colocate VMs for low-latency communication

High-performance computing workloads that require low-latency inter-VM communication benefit from placement policies that keep instances physically close.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const baz = new gcp.compute.ResourcePolicy("baz", {
    name: "gce-policy",
    region: "us-central1",
    groupPlacementPolicy: {
        vmCount: 2,
        collocation: "COLLOCATED",
    },
});
import pulumi
import pulumi_gcp as gcp

baz = gcp.compute.ResourcePolicy("baz",
    name="gce-policy",
    region="us-central1",
    group_placement_policy={
        "vm_count": 2,
        "collocation": "COLLOCATED",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewResourcePolicy(ctx, "baz", &compute.ResourcePolicyArgs{
			Name:   pulumi.String("gce-policy"),
			Region: pulumi.String("us-central1"),
			GroupPlacementPolicy: &compute.ResourcePolicyGroupPlacementPolicyArgs{
				VmCount:     pulumi.Int(2),
				Collocation: pulumi.String("COLLOCATED"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var baz = new Gcp.Compute.ResourcePolicy("baz", new()
    {
        Name = "gce-policy",
        Region = "us-central1",
        GroupPlacementPolicy = new Gcp.Compute.Inputs.ResourcePolicyGroupPlacementPolicyArgs
        {
            VmCount = 2,
            Collocation = "COLLOCATED",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ResourcePolicy;
import com.pulumi.gcp.compute.ResourcePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicyGroupPlacementPolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var baz = new ResourcePolicy("baz", ResourcePolicyArgs.builder()
            .name("gce-policy")
            .region("us-central1")
            .groupPlacementPolicy(ResourcePolicyGroupPlacementPolicyArgs.builder()
                .vmCount(2)
                .collocation("COLLOCATED")
                .build())
            .build());

    }
}
resources:
  baz:
    type: gcp:compute:ResourcePolicy
    properties:
      name: gce-policy
      region: us-central1
      groupPlacementPolicy:
        vmCount: 2
        collocation: COLLOCATED

The groupPlacementPolicy places the specified number of VMs (vmCount: 2) on the same physical hardware (collocation: “COLLOCATED”). This minimizes network latency between instances. Attach this policy to VM instances at creation time to control their physical placement.

Define GPU topology for ML training workloads

Machine learning training jobs that use multiple GPUs require specific topology configurations to optimize inter-GPU communication bandwidth.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const baz = new gcp.compute.ResourcePolicy("baz", {
    name: "gce-policy",
    region: "europe-west9",
    groupPlacementPolicy: {
        collocation: "COLLOCATED",
        gpuTopology: "1x72",
    },
});
import pulumi
import pulumi_gcp as gcp

baz = gcp.compute.ResourcePolicy("baz",
    name="gce-policy",
    region="europe-west9",
    group_placement_policy={
        "collocation": "COLLOCATED",
        "gpu_topology": "1x72",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewResourcePolicy(ctx, "baz", &compute.ResourcePolicyArgs{
			Name:   pulumi.String("gce-policy"),
			Region: pulumi.String("europe-west9"),
			GroupPlacementPolicy: &compute.ResourcePolicyGroupPlacementPolicyArgs{
				Collocation: pulumi.String("COLLOCATED"),
				GpuTopology: pulumi.String("1x72"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var baz = new Gcp.Compute.ResourcePolicy("baz", new()
    {
        Name = "gce-policy",
        Region = "europe-west9",
        GroupPlacementPolicy = new Gcp.Compute.Inputs.ResourcePolicyGroupPlacementPolicyArgs
        {
            Collocation = "COLLOCATED",
            GpuTopology = "1x72",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ResourcePolicy;
import com.pulumi.gcp.compute.ResourcePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicyGroupPlacementPolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var baz = new ResourcePolicy("baz", ResourcePolicyArgs.builder()
            .name("gce-policy")
            .region("europe-west9")
            .groupPlacementPolicy(ResourcePolicyGroupPlacementPolicyArgs.builder()
                .collocation("COLLOCATED")
                .gpuTopology("1x72")
                .build())
            .build());

    }
}
resources:
  baz:
    type: gcp:compute:ResourcePolicy
    properties:
      name: gce-policy
      region: europe-west9
      groupPlacementPolicy:
        collocation: COLLOCATED
        gpuTopology: 1x72

The gpuTopology property defines the GPU layout as a string (e.g., “1x72” for 72 GPUs in a single dimension). This works with collocation to ensure GPUs are arranged for optimal communication. Use this for distributed training workloads that span multiple GPU-equipped VMs.

Enable crash-consistent snapshots across disks

Applications that span multiple disks need consistency groups to ensure snapshots capture a coherent state across all volumes at the same point in time.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const cgroup = new gcp.compute.ResourcePolicy("cgroup", {
    name: "gce-policy",
    region: "europe-west1",
    diskConsistencyGroupPolicy: {
        enabled: true,
    },
});
import pulumi
import pulumi_gcp as gcp

cgroup = gcp.compute.ResourcePolicy("cgroup",
    name="gce-policy",
    region="europe-west1",
    disk_consistency_group_policy={
        "enabled": True,
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewResourcePolicy(ctx, "cgroup", &compute.ResourcePolicyArgs{
			Name:   pulumi.String("gce-policy"),
			Region: pulumi.String("europe-west1"),
			DiskConsistencyGroupPolicy: &compute.ResourcePolicyDiskConsistencyGroupPolicyArgs{
				Enabled: pulumi.Bool(true),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var cgroup = new Gcp.Compute.ResourcePolicy("cgroup", new()
    {
        Name = "gce-policy",
        Region = "europe-west1",
        DiskConsistencyGroupPolicy = new Gcp.Compute.Inputs.ResourcePolicyDiskConsistencyGroupPolicyArgs
        {
            Enabled = true,
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ResourcePolicy;
import com.pulumi.gcp.compute.ResourcePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicyDiskConsistencyGroupPolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var cgroup = new ResourcePolicy("cgroup", ResourcePolicyArgs.builder()
            .name("gce-policy")
            .region("europe-west1")
            .diskConsistencyGroupPolicy(ResourcePolicyDiskConsistencyGroupPolicyArgs.builder()
                .enabled(true)
                .build())
            .build());

    }
}
resources:
  cgroup:
    type: gcp:compute:ResourcePolicy
    properties:
      name: gce-policy
      region: europe-west1
      diskConsistencyGroupPolicy:
        enabled: true

The diskConsistencyGroupPolicy enables crash-consistent snapshots across multiple disks. When enabled, snapshots of all disks in the group are coordinated to represent the same point in time. Attach this policy to multiple persistent disks that belong to the same application.

Beyond these examples

These snippets focus on specific resource policy features: snapshot scheduling and retention, instance start/stop automation, and VM placement and hardware topology. They’re intentionally minimal rather than full infrastructure deployments.

The examples assume pre-existing infrastructure such as persistent disks to attach snapshot policies, and VM instances to attach placement or schedule policies. They focus on defining the policy rather than attaching it to resources.

To keep things focused, common resource policy patterns are omitted, including:

  • Policy attachment to resources (done via disk or instance resources)
  • Workload policy types beyond HIGH_AVAILABILITY and HIGH_THROUGHPUT
  • TPU topology configuration (tpuTopology)
  • Snapshot chain naming (chainName)

These omissions are intentional: the goal is to illustrate how each policy type is configured, not provide drop-in automation modules. See the Resource Policy resource reference for all available configuration options.

Let's configure GCP Compute Resource Policies

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

Configuration & Naming
What are the naming requirements for resource policies?
The name must be 1-63 characters long and follow RFC1035: start with a lowercase letter, contain only lowercase letters, digits, and dashes, and not end with a dash (regex: a-z?).
What happens if I change the region or project after creation?
Both region and project are immutable. Changing either will force resource replacement (destroy and recreate).
Policy Types & Selection
What types of resource policies can I create?
You can create five types: snapshotSchedulePolicy (snapshot automation), groupPlacementPolicy (VM placement), instanceSchedulePolicy (start/stop scheduling), diskConsistencyGroupPolicy (replication consistency), and workloadPolicy (workload optimization).
Can I configure multiple policy types in a single resource policy?
The examples show each policy type used independently. Configure one policy type per resource policy based on your use case.
Snapshot Policies
What snapshot scheduling options are available?
You can use dailySchedule (specify daysInCycle and startTime) or hourlySchedule (specify hoursInCycle and startTime) within snapshotSchedulePolicy.schedule.
How do I configure snapshot retention and deletion behavior?
Set retentionPolicy with maxRetentionDays for retention duration and onSourceDiskDelete (e.g., KEEP_AUTO_SNAPSHOTS) to control behavior when the source disk is deleted.
Can I specify where snapshots are stored?
Yes, use snapshotProperties.storageLocations to specify the storage location (e.g., us for US multi-region).
What's the purpose of the chainName property in snapshot schedules?
The chainName property in snapshotProperties identifies a snapshot chain for organizational purposes.
Instance Scheduling
How do I schedule automatic instance start and stop times?
Use instanceSchedulePolicy with vmStartSchedule.schedule and vmStopSchedule.schedule using cron expressions (e.g., 0 * * * * for hourly), and specify timeZone.
Placement & Workload Policies
How do I configure VM placement for collocated instances?
Use groupPlacementPolicy with collocation set to COLLOCATED and specify vmCount. Optionally add maxDistance for distance constraints.
What topology options are available for placement policies?
You can specify gpuTopology (e.g., 1x72), tpuTopology (e.g., 4x4), or maxDistance (e.g., 2) depending on your hardware and placement requirements.
What's the difference between HIGH_AVAILABILITY and HIGH_THROUGHPUT workload types?
workloadPolicy.type can be HIGH_AVAILABILITY (for availability-focused workloads) or HIGH_THROUGHPUT (for throughput-focused workloads, which supports acceleratorTopology and maxTopologyDistance options).

Using a different cloud?

Explore compute guides for other cloud providers: