Configure GCP Compute Resource Policies

The gcp:compute/resourcePolicy:ResourcePolicy resource, part of the Pulumi GCP provider, defines policies that schedule actions or control placement for compute resources. This guide focuses on three capabilities: snapshot scheduling and retention, instance start/stop automation, and VM placement for performance and availability.

Resource policies are created independently but must be attached to instances or disks via their respective resource configurations. The examples are intentionally small. Combine them with your own compute instances and disk resources.

Schedule daily snapshots at a fixed time

Backup strategies often begin with automated snapshots that run daily at off-peak hours, eliminating the need for external schedulers.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const foo = new gcp.compute.ResourcePolicy("foo", {
    name: "gce-policy",
    region: "us-central1",
    snapshotSchedulePolicy: {
        schedule: {
            dailySchedule: {
                daysInCycle: 1,
                startTime: "04:00",
            },
        },
    },
});
import pulumi
import pulumi_gcp as gcp

foo = gcp.compute.ResourcePolicy("foo",
    name="gce-policy",
    region="us-central1",
    snapshot_schedule_policy={
        "schedule": {
            "daily_schedule": {
                "days_in_cycle": 1,
                "start_time": "04:00",
            },
        },
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewResourcePolicy(ctx, "foo", &compute.ResourcePolicyArgs{
			Name:   pulumi.String("gce-policy"),
			Region: pulumi.String("us-central1"),
			SnapshotSchedulePolicy: &compute.ResourcePolicySnapshotSchedulePolicyArgs{
				Schedule: &compute.ResourcePolicySnapshotSchedulePolicyScheduleArgs{
					DailySchedule: &compute.ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs{
						DaysInCycle: pulumi.Int(1),
						StartTime:   pulumi.String("04:00"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var foo = new Gcp.Compute.ResourcePolicy("foo", new()
    {
        Name = "gce-policy",
        Region = "us-central1",
        SnapshotSchedulePolicy = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyArgs
        {
            Schedule = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyScheduleArgs
            {
                DailySchedule = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs
                {
                    DaysInCycle = 1,
                    StartTime = "04:00",
                },
            },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ResourcePolicy;
import com.pulumi.gcp.compute.ResourcePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyScheduleArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var foo = new ResourcePolicy("foo", ResourcePolicyArgs.builder()
            .name("gce-policy")
            .region("us-central1")
            .snapshotSchedulePolicy(ResourcePolicySnapshotSchedulePolicyArgs.builder()
                .schedule(ResourcePolicySnapshotSchedulePolicyScheduleArgs.builder()
                    .dailySchedule(ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs.builder()
                        .daysInCycle(1)
                        .startTime("04:00")
                        .build())
                    .build())
                .build())
            .build());

    }
}
resources:
  foo:
    type: gcp:compute:ResourcePolicy
    properties:
      name: gce-policy
      region: us-central1
      snapshotSchedulePolicy:
        schedule:
          dailySchedule:
            daysInCycle: 1
            startTime: 04:00

The snapshotSchedulePolicy defines when snapshots occur. The dailySchedule runs every day (daysInCycle: 1) at the specified startTime in 24-hour format. Attach this policy to persistent disks to enable automatic backups.

Configure snapshot retention and storage location

Production snapshots need retention policies to manage storage costs and meet compliance requirements.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const bar = new gcp.compute.ResourcePolicy("bar", {
    name: "gce-policy",
    region: "us-central1",
    snapshotSchedulePolicy: {
        schedule: {
            hourlySchedule: {
                hoursInCycle: 20,
                startTime: "23:00",
            },
        },
        retentionPolicy: {
            maxRetentionDays: 10,
            onSourceDiskDelete: "KEEP_AUTO_SNAPSHOTS",
        },
        snapshotProperties: {
            labels: {
                my_label: "value",
            },
            storageLocations: "us",
            guestFlush: true,
        },
    },
});
import pulumi
import pulumi_gcp as gcp

bar = gcp.compute.ResourcePolicy("bar",
    name="gce-policy",
    region="us-central1",
    snapshot_schedule_policy={
        "schedule": {
            "hourly_schedule": {
                "hours_in_cycle": 20,
                "start_time": "23:00",
            },
        },
        "retention_policy": {
            "max_retention_days": 10,
            "on_source_disk_delete": "KEEP_AUTO_SNAPSHOTS",
        },
        "snapshot_properties": {
            "labels": {
                "my_label": "value",
            },
            "storage_locations": "us",
            "guest_flush": True,
        },
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewResourcePolicy(ctx, "bar", &compute.ResourcePolicyArgs{
			Name:   pulumi.String("gce-policy"),
			Region: pulumi.String("us-central1"),
			SnapshotSchedulePolicy: &compute.ResourcePolicySnapshotSchedulePolicyArgs{
				Schedule: &compute.ResourcePolicySnapshotSchedulePolicyScheduleArgs{
					HourlySchedule: &compute.ResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleArgs{
						HoursInCycle: pulumi.Int(20),
						StartTime:    pulumi.String("23:00"),
					},
				},
				RetentionPolicy: &compute.ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs{
					MaxRetentionDays:   pulumi.Int(10),
					OnSourceDiskDelete: pulumi.String("KEEP_AUTO_SNAPSHOTS"),
				},
				SnapshotProperties: &compute.ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs{
					Labels: pulumi.StringMap{
						"my_label": pulumi.String("value"),
					},
					StorageLocations: pulumi.String("us"),
					GuestFlush:       pulumi.Bool(true),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var bar = new Gcp.Compute.ResourcePolicy("bar", new()
    {
        Name = "gce-policy",
        Region = "us-central1",
        SnapshotSchedulePolicy = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyArgs
        {
            Schedule = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyScheduleArgs
            {
                HourlySchedule = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleArgs
                {
                    HoursInCycle = 20,
                    StartTime = "23:00",
                },
            },
            RetentionPolicy = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs
            {
                MaxRetentionDays = 10,
                OnSourceDiskDelete = "KEEP_AUTO_SNAPSHOTS",
            },
            SnapshotProperties = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs
            {
                Labels = 
                {
                    { "my_label", "value" },
                },
                StorageLocations = "us",
                GuestFlush = true,
            },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ResourcePolicy;
import com.pulumi.gcp.compute.ResourcePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyScheduleArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var bar = new ResourcePolicy("bar", ResourcePolicyArgs.builder()
            .name("gce-policy")
            .region("us-central1")
            .snapshotSchedulePolicy(ResourcePolicySnapshotSchedulePolicyArgs.builder()
                .schedule(ResourcePolicySnapshotSchedulePolicyScheduleArgs.builder()
                    .hourlySchedule(ResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleArgs.builder()
                        .hoursInCycle(20)
                        .startTime("23:00")
                        .build())
                    .build())
                .retentionPolicy(ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs.builder()
                    .maxRetentionDays(10)
                    .onSourceDiskDelete("KEEP_AUTO_SNAPSHOTS")
                    .build())
                .snapshotProperties(ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs.builder()
                    .labels(Map.of("my_label", "value"))
                    .storageLocations("us")
                    .guestFlush(true)
                    .build())
                .build())
            .build());

    }
}
resources:
  bar:
    type: gcp:compute:ResourcePolicy
    properties:
      name: gce-policy
      region: us-central1
      snapshotSchedulePolicy:
        schedule:
          hourlySchedule:
            hoursInCycle: 20
            startTime: 23:00
        retentionPolicy:
          maxRetentionDays: 10
          onSourceDiskDelete: KEEP_AUTO_SNAPSHOTS
        snapshotProperties:
          labels:
            my_label: value
          storageLocations: us
          guestFlush: true

The retentionPolicy controls how long snapshots are kept (maxRetentionDays) and what happens when the source disk is deleted. The snapshotProperties block sets the storage location (storageLocations: “us” for multi-region) and enables guestFlush to ensure filesystem consistency. This configuration extends basic scheduling with lifecycle management.

Start and stop instances on a schedule

Development and test environments often run only during business hours to reduce compute costs.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const hourly = new gcp.compute.ResourcePolicy("hourly", {
    name: "gce-policy",
    region: "us-central1",
    description: "Start and stop instances",
    instanceSchedulePolicy: {
        vmStartSchedule: {
            schedule: "0 * * * *",
        },
        vmStopSchedule: {
            schedule: "15 * * * *",
        },
        timeZone: "US/Central",
    },
});
import pulumi
import pulumi_gcp as gcp

hourly = gcp.compute.ResourcePolicy("hourly",
    name="gce-policy",
    region="us-central1",
    description="Start and stop instances",
    instance_schedule_policy={
        "vm_start_schedule": {
            "schedule": "0 * * * *",
        },
        "vm_stop_schedule": {
            "schedule": "15 * * * *",
        },
        "time_zone": "US/Central",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewResourcePolicy(ctx, "hourly", &compute.ResourcePolicyArgs{
			Name:        pulumi.String("gce-policy"),
			Region:      pulumi.String("us-central1"),
			Description: pulumi.String("Start and stop instances"),
			InstanceSchedulePolicy: &compute.ResourcePolicyInstanceSchedulePolicyArgs{
				VmStartSchedule: &compute.ResourcePolicyInstanceSchedulePolicyVmStartScheduleArgs{
					Schedule: pulumi.String("0 * * * *"),
				},
				VmStopSchedule: &compute.ResourcePolicyInstanceSchedulePolicyVmStopScheduleArgs{
					Schedule: pulumi.String("15 * * * *"),
				},
				TimeZone: pulumi.String("US/Central"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var hourly = new Gcp.Compute.ResourcePolicy("hourly", new()
    {
        Name = "gce-policy",
        Region = "us-central1",
        Description = "Start and stop instances",
        InstanceSchedulePolicy = new Gcp.Compute.Inputs.ResourcePolicyInstanceSchedulePolicyArgs
        {
            VmStartSchedule = new Gcp.Compute.Inputs.ResourcePolicyInstanceSchedulePolicyVmStartScheduleArgs
            {
                Schedule = "0 * * * *",
            },
            VmStopSchedule = new Gcp.Compute.Inputs.ResourcePolicyInstanceSchedulePolicyVmStopScheduleArgs
            {
                Schedule = "15 * * * *",
            },
            TimeZone = "US/Central",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ResourcePolicy;
import com.pulumi.gcp.compute.ResourcePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicyInstanceSchedulePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicyInstanceSchedulePolicyVmStartScheduleArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicyInstanceSchedulePolicyVmStopScheduleArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var hourly = new ResourcePolicy("hourly", ResourcePolicyArgs.builder()
            .name("gce-policy")
            .region("us-central1")
            .description("Start and stop instances")
            .instanceSchedulePolicy(ResourcePolicyInstanceSchedulePolicyArgs.builder()
                .vmStartSchedule(ResourcePolicyInstanceSchedulePolicyVmStartScheduleArgs.builder()
                    .schedule("0 * * * *")
                    .build())
                .vmStopSchedule(ResourcePolicyInstanceSchedulePolicyVmStopScheduleArgs.builder()
                    .schedule("15 * * * *")
                    .build())
                .timeZone("US/Central")
                .build())
            .build());

    }
}
resources:
  hourly:
    type: gcp:compute:ResourcePolicy
    properties:
      name: gce-policy
      region: us-central1
      description: Start and stop instances
      instanceSchedulePolicy:
        vmStartSchedule:
          schedule: 0 * * * *
        vmStopSchedule:
          schedule: 15 * * * *
        timeZone: US/Central

The instanceSchedulePolicy uses cron expressions to control VM power state. The vmStartSchedule and vmStopSchedule properties define when instances turn on and off. The timeZone property ensures schedules align with your business hours regardless of the region’s default timezone.

Colocate VMs for low-latency communication

High-performance computing and distributed databases benefit from placing VMs physically close together to minimize network latency.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const baz = new gcp.compute.ResourcePolicy("baz", {
    name: "gce-policy",
    region: "us-central1",
    groupPlacementPolicy: {
        vmCount: 2,
        collocation: "COLLOCATED",
    },
});
import pulumi
import pulumi_gcp as gcp

baz = gcp.compute.ResourcePolicy("baz",
    name="gce-policy",
    region="us-central1",
    group_placement_policy={
        "vm_count": 2,
        "collocation": "COLLOCATED",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewResourcePolicy(ctx, "baz", &compute.ResourcePolicyArgs{
			Name:   pulumi.String("gce-policy"),
			Region: pulumi.String("us-central1"),
			GroupPlacementPolicy: &compute.ResourcePolicyGroupPlacementPolicyArgs{
				VmCount:     pulumi.Int(2),
				Collocation: pulumi.String("COLLOCATED"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var baz = new Gcp.Compute.ResourcePolicy("baz", new()
    {
        Name = "gce-policy",
        Region = "us-central1",
        GroupPlacementPolicy = new Gcp.Compute.Inputs.ResourcePolicyGroupPlacementPolicyArgs
        {
            VmCount = 2,
            Collocation = "COLLOCATED",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ResourcePolicy;
import com.pulumi.gcp.compute.ResourcePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicyGroupPlacementPolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var baz = new ResourcePolicy("baz", ResourcePolicyArgs.builder()
            .name("gce-policy")
            .region("us-central1")
            .groupPlacementPolicy(ResourcePolicyGroupPlacementPolicyArgs.builder()
                .vmCount(2)
                .collocation("COLLOCATED")
                .build())
            .build());

    }
}
resources:
  baz:
    type: gcp:compute:ResourcePolicy
    properties:
      name: gce-policy
      region: us-central1
      groupPlacementPolicy:
        vmCount: 2
        collocation: COLLOCATED

The groupPlacementPolicy controls how GCP distributes instances across hardware. Setting collocation to “COLLOCATED” places the specified vmCount on the same physical infrastructure, reducing inter-VM latency for tightly coupled workloads.

Optimize placement for high availability workloads

Mission-critical applications require workload policies that prioritize availability over raw performance.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const bar = new gcp.compute.ResourcePolicy("bar", {
    name: "gce-policy",
    region: "europe-west1",
    workloadPolicy: {
        type: "HIGH_AVAILABILITY",
    },
});
import pulumi
import pulumi_gcp as gcp

bar = gcp.compute.ResourcePolicy("bar",
    name="gce-policy",
    region="europe-west1",
    workload_policy={
        "type": "HIGH_AVAILABILITY",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewResourcePolicy(ctx, "bar", &compute.ResourcePolicyArgs{
			Name:   pulumi.String("gce-policy"),
			Region: pulumi.String("europe-west1"),
			WorkloadPolicy: &compute.ResourcePolicyWorkloadPolicyArgs{
				Type: pulumi.String("HIGH_AVAILABILITY"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var bar = new Gcp.Compute.ResourcePolicy("bar", new()
    {
        Name = "gce-policy",
        Region = "europe-west1",
        WorkloadPolicy = new Gcp.Compute.Inputs.ResourcePolicyWorkloadPolicyArgs
        {
            Type = "HIGH_AVAILABILITY",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ResourcePolicy;
import com.pulumi.gcp.compute.ResourcePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicyWorkloadPolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var bar = new ResourcePolicy("bar", ResourcePolicyArgs.builder()
            .name("gce-policy")
            .region("europe-west1")
            .workloadPolicy(ResourcePolicyWorkloadPolicyArgs.builder()
                .type("HIGH_AVAILABILITY")
                .build())
            .build());

    }
}
resources:
  bar:
    type: gcp:compute:ResourcePolicy
    properties:
      name: gce-policy
      region: europe-west1
      workloadPolicy:
        type: HIGH_AVAILABILITY

The workloadPolicy guides GCP’s placement decisions based on application characteristics. Setting type to “HIGH_AVAILABILITY” distributes VMs across fault domains to maximize resilience against hardware failures.

Enable crash-consistent multi-disk snapshots

Applications with data spread across multiple disks need coordinated snapshots to maintain consistency.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const cgroup = new gcp.compute.ResourcePolicy("cgroup", {
    name: "gce-policy",
    region: "europe-west1",
    diskConsistencyGroupPolicy: {
        enabled: true,
    },
});
import pulumi
import pulumi_gcp as gcp

cgroup = gcp.compute.ResourcePolicy("cgroup",
    name="gce-policy",
    region="europe-west1",
    disk_consistency_group_policy={
        "enabled": True,
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewResourcePolicy(ctx, "cgroup", &compute.ResourcePolicyArgs{
			Name:   pulumi.String("gce-policy"),
			Region: pulumi.String("europe-west1"),
			DiskConsistencyGroupPolicy: &compute.ResourcePolicyDiskConsistencyGroupPolicyArgs{
				Enabled: pulumi.Bool(true),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var cgroup = new Gcp.Compute.ResourcePolicy("cgroup", new()
    {
        Name = "gce-policy",
        Region = "europe-west1",
        DiskConsistencyGroupPolicy = new Gcp.Compute.Inputs.ResourcePolicyDiskConsistencyGroupPolicyArgs
        {
            Enabled = true,
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ResourcePolicy;
import com.pulumi.gcp.compute.ResourcePolicyArgs;
import com.pulumi.gcp.compute.inputs.ResourcePolicyDiskConsistencyGroupPolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var cgroup = new ResourcePolicy("cgroup", ResourcePolicyArgs.builder()
            .name("gce-policy")
            .region("europe-west1")
            .diskConsistencyGroupPolicy(ResourcePolicyDiskConsistencyGroupPolicyArgs.builder()
                .enabled(true)
                .build())
            .build());

    }
}
resources:
  cgroup:
    type: gcp:compute:ResourcePolicy
    properties:
      name: gce-policy
      region: europe-west1
      diskConsistencyGroupPolicy:
        enabled: true

The diskConsistencyGroupPolicy ensures all disks in the group snapshot at the same point in time. Setting enabled to true creates application-consistent backups across multiple disks, critical for databases and stateful applications.

Beyond these examples

These snippets focus on specific resource policy features: snapshot scheduling and retention, instance power management, and VM placement and workload optimization. They’re intentionally minimal rather than full backup or deployment solutions.

The examples assume pre-existing infrastructure such as GCP project and region configuration, and compute instances or disks to attach policies to. They focus on defining the policy rather than the attachment mechanism.

To keep things focused, common resource policy patterns are omitted, including:

  • Policy attachment to resources (done via instance or disk resources)
  • Advanced snapshot properties (chainName for incremental backups)
  • Specialized hardware topologies (GPU/TPU configurations)
  • Fine-grained placement controls (maxDistance, maxTopologyDistance)

These omissions are intentional: the goal is to illustrate how each policy feature is wired, not provide drop-in infrastructure modules. See the Compute ResourcePolicy resource reference for all available configuration options.

Let's configure GCP Compute Resource Policies

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

Policy Types & Selection
What types of resource policies can I create?
You can create five types of policies: snapshotSchedulePolicy for disk snapshots, instanceSchedulePolicy for VM start/stop scheduling, groupPlacementPolicy for VM placement, diskConsistencyGroupPolicy for replication consistency, and workloadPolicy for workload configuration. Each resource policy uses one of these types.
What's the difference between HIGH_AVAILABILITY and HIGH_THROUGHPUT workload policies?
HIGH_AVAILABILITY focuses on availability, while HIGH_THROUGHPUT optimizes for throughput and supports additional topology configurations like acceleratorTopology, acceleratorTopologyMode, and maxTopologyDistance.
Snapshot Scheduling
How do I schedule automatic snapshots for my disks?
Configure snapshotSchedulePolicy with either dailySchedule (using daysInCycle and startTime) or hourlySchedule (using hoursInCycle and startTime). You can add retentionPolicy to control retention days and behavior on source disk deletion, and snapshotProperties for labels, storage locations, and guest flush settings.
What does the chainName property do in snapshot schedules?
The chainName property in snapshotProperties allows you to name the snapshot chain for organizational purposes.
Instance & Placement Scheduling
How do I automatically start and stop instances on a schedule?
Use instanceSchedulePolicy with vmStartSchedule and vmStopSchedule, each containing a cron-format schedule. Specify the timeZone for schedule interpretation (e.g., “US/Central”).
How do I configure VM placement for collocated instances?
Use groupPlacementPolicy with collocation set to “COLLOCATED” and specify vmCount. You can optionally add maxDistance for distance constraints, gpuTopology for GPU configurations (e.g., “1x72”), or tpuTopology for TPU configurations (e.g., “4x4”).
Resource Constraints
What are the naming requirements for resource policies?
The name must be 1-63 characters long, start with a lowercase letter, contain only lowercase letters, digits, and dashes, and cannot end with a dash. It must match the pattern [a-z][-a-z0-9]*[a-z0-9]?.
Can I change the region or project after creating a resource policy?
No, both region and project are immutable and cannot be changed after creation.
What's the purpose of a disk consistency group policy?
The diskConsistencyGroupPolicy creates a replication consistency group for asynchronous disk replication. Enable it by setting enabled to true.

Using a different cloud?

Explore compute guides for other cloud providers: