Configure GCP Sole-Tenant Node Groups

The gcp:compute/nodeGroup:NodeGroup resource, part of the Pulumi GCP provider, manages groups of sole-tenant nodes: physical hosts reserved exclusively for your VMs with hardware isolation. This guide focuses on three capabilities: fixed and autoscaling node group sizing, maintenance interval configuration, and cross-project sharing.

Node groups depend on a NodeTemplate that defines the machine type and zone placement. The examples are intentionally small. Combine them with your own node templates and project configuration.

Create a fixed-size node group

Sole-tenant deployments start by creating a node template, then provisioning a fixed number of physical hosts in a specific zone.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
    name: "soletenant-tmpl",
    region: "us-central1",
    nodeType: "n1-node-96-624",
});
const nodes = new gcp.compute.NodeGroup("nodes", {
    name: "soletenant-group",
    zone: "us-central1-a",
    description: "example google_compute_node_group for the Google Provider",
    initialSize: 1,
    nodeTemplate: soletenant_tmpl.id,
});
import pulumi
import pulumi_gcp as gcp

soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
    name="soletenant-tmpl",
    region="us-central1",
    node_type="n1-node-96-624")
nodes = gcp.compute.NodeGroup("nodes",
    name="soletenant-group",
    zone="us-central1-a",
    description="example google_compute_node_group for the Google Provider",
    initial_size=1,
    node_template=soletenant_tmpl.id)
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		soletenant_tmpl, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
			Name:     pulumi.String("soletenant-tmpl"),
			Region:   pulumi.String("us-central1"),
			NodeType: pulumi.String("n1-node-96-624"),
		})
		if err != nil {
			return err
		}
		_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
			Name:         pulumi.String("soletenant-group"),
			Zone:         pulumi.String("us-central1-a"),
			Description:  pulumi.String("example google_compute_node_group for the Google Provider"),
			InitialSize:  pulumi.Int(1),
			NodeTemplate: soletenant_tmpl.ID(),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
    {
        Name = "soletenant-tmpl",
        Region = "us-central1",
        NodeType = "n1-node-96-624",
    });

    var nodes = new Gcp.Compute.NodeGroup("nodes", new()
    {
        Name = "soletenant-group",
        Zone = "us-central1-a",
        Description = "example google_compute_node_group for the Google Provider",
        InitialSize = 1,
        NodeTemplate = soletenant_tmpl.Id,
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.NodeGroup;
import com.pulumi.gcp.compute.NodeGroupArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()
            .name("soletenant-tmpl")
            .region("us-central1")
            .nodeType("n1-node-96-624")
            .build());

        var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()
            .name("soletenant-group")
            .zone("us-central1-a")
            .description("example google_compute_node_group for the Google Provider")
            .initialSize(1)
            .nodeTemplate(soletenant_tmpl.id())
            .build());

    }
}
resources:
  soletenant-tmpl:
    type: gcp:compute:NodeTemplate
    properties:
      name: soletenant-tmpl
      region: us-central1
      nodeType: n1-node-96-624
  nodes:
    type: gcp:compute:NodeGroup
    properties:
      name: soletenant-group
      zone: us-central1-a
      description: example google_compute_node_group for the Google Provider
      initialSize: 1
      nodeTemplate: ${["soletenant-tmpl"].id}

The nodeTemplate property references the template that defines machine type and configuration. The initialSize property sets the number of physical hosts to provision. Once created, the node group reserves dedicated hardware for your VMs.

Control maintenance frequency with RECURRENT interval

Production workloads benefit from predictable maintenance windows that batch updates rather than applying them as-needed.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
    name: "soletenant-tmpl",
    region: "us-central1",
    nodeType: "c2-node-60-240",
});
const nodes = new gcp.compute.NodeGroup("nodes", {
    name: "soletenant-group",
    zone: "us-central1-a",
    description: "example google_compute_node_group for Terraform Google Provider",
    initialSize: 1,
    nodeTemplate: soletenant_tmpl.id,
    maintenanceInterval: "RECURRENT",
});
import pulumi
import pulumi_gcp as gcp

soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
    name="soletenant-tmpl",
    region="us-central1",
    node_type="c2-node-60-240")
nodes = gcp.compute.NodeGroup("nodes",
    name="soletenant-group",
    zone="us-central1-a",
    description="example google_compute_node_group for Terraform Google Provider",
    initial_size=1,
    node_template=soletenant_tmpl.id,
    maintenance_interval="RECURRENT")
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		soletenant_tmpl, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
			Name:     pulumi.String("soletenant-tmpl"),
			Region:   pulumi.String("us-central1"),
			NodeType: pulumi.String("c2-node-60-240"),
		})
		if err != nil {
			return err
		}
		_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
			Name:                pulumi.String("soletenant-group"),
			Zone:                pulumi.String("us-central1-a"),
			Description:         pulumi.String("example google_compute_node_group for Terraform Google Provider"),
			InitialSize:         pulumi.Int(1),
			NodeTemplate:        soletenant_tmpl.ID(),
			MaintenanceInterval: pulumi.String("RECURRENT"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
    {
        Name = "soletenant-tmpl",
        Region = "us-central1",
        NodeType = "c2-node-60-240",
    });

    var nodes = new Gcp.Compute.NodeGroup("nodes", new()
    {
        Name = "soletenant-group",
        Zone = "us-central1-a",
        Description = "example google_compute_node_group for Terraform Google Provider",
        InitialSize = 1,
        NodeTemplate = soletenant_tmpl.Id,
        MaintenanceInterval = "RECURRENT",
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.NodeGroup;
import com.pulumi.gcp.compute.NodeGroupArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()
            .name("soletenant-tmpl")
            .region("us-central1")
            .nodeType("c2-node-60-240")
            .build());

        var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()
            .name("soletenant-group")
            .zone("us-central1-a")
            .description("example google_compute_node_group for Terraform Google Provider")
            .initialSize(1)
            .nodeTemplate(soletenant_tmpl.id())
            .maintenanceInterval("RECURRENT")
            .build());

    }
}
resources:
  soletenant-tmpl:
    type: gcp:compute:NodeTemplate
    properties:
      name: soletenant-tmpl
      region: us-central1
      nodeType: c2-node-60-240
  nodes:
    type: gcp:compute:NodeGroup
    properties:
      name: soletenant-group
      zone: us-central1-a
      description: example google_compute_node_group for Terraform Google Provider
      initialSize: 1
      nodeTemplate: ${["soletenant-tmpl"].id}
      maintenanceInterval: RECURRENT

The maintenanceInterval property controls when Google applies infrastructure updates. Setting it to RECURRENT schedules updates on a periodic basis (no more than every 28 days), reducing the frequency of live migrations and VM disruptions.

Scale node groups automatically based on demand

Workloads with variable capacity can use autoscaling to adjust node count based on demand.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
    name: "soletenant-tmpl",
    region: "us-central1",
    nodeType: "n1-node-96-624",
});
const nodes = new gcp.compute.NodeGroup("nodes", {
    name: "soletenant-group",
    zone: "us-central1-a",
    description: "example google_compute_node_group for Google Provider",
    maintenancePolicy: "RESTART_IN_PLACE",
    maintenanceWindow: {
        startTime: "08:00",
    },
    initialSize: 1,
    nodeTemplate: soletenant_tmpl.id,
    autoscalingPolicy: {
        mode: "ONLY_SCALE_OUT",
        minNodes: 1,
        maxNodes: 10,
    },
});
import pulumi
import pulumi_gcp as gcp

soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
    name="soletenant-tmpl",
    region="us-central1",
    node_type="n1-node-96-624")
nodes = gcp.compute.NodeGroup("nodes",
    name="soletenant-group",
    zone="us-central1-a",
    description="example google_compute_node_group for Google Provider",
    maintenance_policy="RESTART_IN_PLACE",
    maintenance_window={
        "start_time": "08:00",
    },
    initial_size=1,
    node_template=soletenant_tmpl.id,
    autoscaling_policy={
        "mode": "ONLY_SCALE_OUT",
        "min_nodes": 1,
        "max_nodes": 10,
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		soletenant_tmpl, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
			Name:     pulumi.String("soletenant-tmpl"),
			Region:   pulumi.String("us-central1"),
			NodeType: pulumi.String("n1-node-96-624"),
		})
		if err != nil {
			return err
		}
		_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
			Name:              pulumi.String("soletenant-group"),
			Zone:              pulumi.String("us-central1-a"),
			Description:       pulumi.String("example google_compute_node_group for Google Provider"),
			MaintenancePolicy: pulumi.String("RESTART_IN_PLACE"),
			MaintenanceWindow: &compute.NodeGroupMaintenanceWindowArgs{
				StartTime: pulumi.String("08:00"),
			},
			InitialSize:  pulumi.Int(1),
			NodeTemplate: soletenant_tmpl.ID(),
			AutoscalingPolicy: &compute.NodeGroupAutoscalingPolicyArgs{
				Mode:     pulumi.String("ONLY_SCALE_OUT"),
				MinNodes: pulumi.Int(1),
				MaxNodes: pulumi.Int(10),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
    {
        Name = "soletenant-tmpl",
        Region = "us-central1",
        NodeType = "n1-node-96-624",
    });

    var nodes = new Gcp.Compute.NodeGroup("nodes", new()
    {
        Name = "soletenant-group",
        Zone = "us-central1-a",
        Description = "example google_compute_node_group for Google Provider",
        MaintenancePolicy = "RESTART_IN_PLACE",
        MaintenanceWindow = new Gcp.Compute.Inputs.NodeGroupMaintenanceWindowArgs
        {
            StartTime = "08:00",
        },
        InitialSize = 1,
        NodeTemplate = soletenant_tmpl.Id,
        AutoscalingPolicy = new Gcp.Compute.Inputs.NodeGroupAutoscalingPolicyArgs
        {
            Mode = "ONLY_SCALE_OUT",
            MinNodes = 1,
            MaxNodes = 10,
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.NodeGroup;
import com.pulumi.gcp.compute.NodeGroupArgs;
import com.pulumi.gcp.compute.inputs.NodeGroupMaintenanceWindowArgs;
import com.pulumi.gcp.compute.inputs.NodeGroupAutoscalingPolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()
            .name("soletenant-tmpl")
            .region("us-central1")
            .nodeType("n1-node-96-624")
            .build());

        var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()
            .name("soletenant-group")
            .zone("us-central1-a")
            .description("example google_compute_node_group for Google Provider")
            .maintenancePolicy("RESTART_IN_PLACE")
            .maintenanceWindow(NodeGroupMaintenanceWindowArgs.builder()
                .startTime("08:00")
                .build())
            .initialSize(1)
            .nodeTemplate(soletenant_tmpl.id())
            .autoscalingPolicy(NodeGroupAutoscalingPolicyArgs.builder()
                .mode("ONLY_SCALE_OUT")
                .minNodes(1)
                .maxNodes(10)
                .build())
            .build());

    }
}
resources:
  soletenant-tmpl:
    type: gcp:compute:NodeTemplate
    properties:
      name: soletenant-tmpl
      region: us-central1
      nodeType: n1-node-96-624
  nodes:
    type: gcp:compute:NodeGroup
    properties:
      name: soletenant-group
      zone: us-central1-a
      description: example google_compute_node_group for Google Provider
      maintenancePolicy: RESTART_IN_PLACE
      maintenanceWindow:
        startTime: 08:00
      initialSize: 1
      nodeTemplate: ${["soletenant-tmpl"].id}
      autoscalingPolicy:
        mode: ONLY_SCALE_OUT
        minNodes: 1
        maxNodes: 10

The autoscalingPolicy property defines scaling bounds and behavior. The mode property controls whether the group can scale in both directions or only add nodes (ONLY_SCALE_OUT). The maintenancePolicy and maintenanceWindow properties control how instances behave during maintenance events.

Share node groups across projects

Organizations can share sole-tenant nodes across multiple projects to consolidate infrastructure while maintaining isolation.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const guestProject = new gcp.organizations.Project("guest_project", {
    projectId: "project-id",
    name: "project-name",
    orgId: "123456789",
    deletionPolicy: "DELETE",
});
const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
    name: "soletenant-tmpl",
    region: "us-central1",
    nodeType: "n1-node-96-624",
});
const nodes = new gcp.compute.NodeGroup("nodes", {
    name: "soletenant-group",
    zone: "us-central1-f",
    description: "example google_compute_node_group for Terraform Google Provider",
    initialSize: 1,
    nodeTemplate: soletenant_tmpl.id,
    shareSettings: {
        shareType: "SPECIFIC_PROJECTS",
        projectMaps: [{
            id: guestProject.projectId,
            projectId: guestProject.projectId,
        }],
    },
});
import pulumi
import pulumi_gcp as gcp

guest_project = gcp.organizations.Project("guest_project",
    project_id="project-id",
    name="project-name",
    org_id="123456789",
    deletion_policy="DELETE")
soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
    name="soletenant-tmpl",
    region="us-central1",
    node_type="n1-node-96-624")
nodes = gcp.compute.NodeGroup("nodes",
    name="soletenant-group",
    zone="us-central1-f",
    description="example google_compute_node_group for Terraform Google Provider",
    initial_size=1,
    node_template=soletenant_tmpl.id,
    share_settings={
        "share_type": "SPECIFIC_PROJECTS",
        "project_maps": [{
            "id": guest_project.project_id,
            "project_id": guest_project.project_id,
        }],
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/organizations"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		guestProject, err := organizations.NewProject(ctx, "guest_project", &organizations.ProjectArgs{
			ProjectId:      pulumi.String("project-id"),
			Name:           pulumi.String("project-name"),
			OrgId:          pulumi.String("123456789"),
			DeletionPolicy: pulumi.String("DELETE"),
		})
		if err != nil {
			return err
		}
		soletenant_tmpl, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
			Name:     pulumi.String("soletenant-tmpl"),
			Region:   pulumi.String("us-central1"),
			NodeType: pulumi.String("n1-node-96-624"),
		})
		if err != nil {
			return err
		}
		_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
			Name:         pulumi.String("soletenant-group"),
			Zone:         pulumi.String("us-central1-f"),
			Description:  pulumi.String("example google_compute_node_group for Terraform Google Provider"),
			InitialSize:  pulumi.Int(1),
			NodeTemplate: soletenant_tmpl.ID(),
			ShareSettings: &compute.NodeGroupShareSettingsArgs{
				ShareType: pulumi.String("SPECIFIC_PROJECTS"),
				ProjectMaps: compute.NodeGroupShareSettingsProjectMapArray{
					&compute.NodeGroupShareSettingsProjectMapArgs{
						Id:        guestProject.ProjectId,
						ProjectId: guestProject.ProjectId,
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var guestProject = new Gcp.Organizations.Project("guest_project", new()
    {
        ProjectId = "project-id",
        Name = "project-name",
        OrgId = "123456789",
        DeletionPolicy = "DELETE",
    });

    var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
    {
        Name = "soletenant-tmpl",
        Region = "us-central1",
        NodeType = "n1-node-96-624",
    });

    var nodes = new Gcp.Compute.NodeGroup("nodes", new()
    {
        Name = "soletenant-group",
        Zone = "us-central1-f",
        Description = "example google_compute_node_group for Terraform Google Provider",
        InitialSize = 1,
        NodeTemplate = soletenant_tmpl.Id,
        ShareSettings = new Gcp.Compute.Inputs.NodeGroupShareSettingsArgs
        {
            ShareType = "SPECIFIC_PROJECTS",
            ProjectMaps = new[]
            {
                new Gcp.Compute.Inputs.NodeGroupShareSettingsProjectMapArgs
                {
                    Id = guestProject.ProjectId,
                    ProjectId = guestProject.ProjectId,
                },
            },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.Project;
import com.pulumi.gcp.organizations.ProjectArgs;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.NodeGroup;
import com.pulumi.gcp.compute.NodeGroupArgs;
import com.pulumi.gcp.compute.inputs.NodeGroupShareSettingsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var guestProject = new Project("guestProject", ProjectArgs.builder()
            .projectId("project-id")
            .name("project-name")
            .orgId("123456789")
            .deletionPolicy("DELETE")
            .build());

        var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()
            .name("soletenant-tmpl")
            .region("us-central1")
            .nodeType("n1-node-96-624")
            .build());

        var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()
            .name("soletenant-group")
            .zone("us-central1-f")
            .description("example google_compute_node_group for Terraform Google Provider")
            .initialSize(1)
            .nodeTemplate(soletenant_tmpl.id())
            .shareSettings(NodeGroupShareSettingsArgs.builder()
                .shareType("SPECIFIC_PROJECTS")
                .projectMaps(NodeGroupShareSettingsProjectMapArgs.builder()
                    .id(guestProject.projectId())
                    .projectId(guestProject.projectId())
                    .build())
                .build())
            .build());

    }
}
resources:
  guestProject:
    type: gcp:organizations:Project
    name: guest_project
    properties:
      projectId: project-id
      name: project-name
      orgId: '123456789'
      deletionPolicy: DELETE
  soletenant-tmpl:
    type: gcp:compute:NodeTemplate
    properties:
      name: soletenant-tmpl
      region: us-central1
      nodeType: n1-node-96-624
  nodes:
    type: gcp:compute:NodeGroup
    properties:
      name: soletenant-group
      zone: us-central1-f
      description: example google_compute_node_group for Terraform Google Provider
      initialSize: 1
      nodeTemplate: ${["soletenant-tmpl"].id}
      shareSettings:
        shareType: SPECIFIC_PROJECTS
        projectMaps:
          - id: ${guestProject.projectId}
            projectId: ${guestProject.projectId}

The shareSettings property enables cross-project sharing. Setting shareType to SPECIFIC_PROJECTS restricts access to listed projects. The projectMaps array identifies which projects can schedule VMs on these nodes.

Beyond these examples

These snippets focus on specific node group features: fixed and autoscaling node group sizing, maintenance scheduling and policies, and cross-project sharing. They’re intentionally minimal rather than full sole-tenant deployments.

The examples reference pre-existing infrastructure such as node templates defining machine types, and guest projects for sharing scenarios. They focus on node group configuration rather than provisioning the surrounding infrastructure.

To keep things focused, common node group patterns are omitted, including:

  • Maintenance window time ranges (maintenanceWindow.startTime)
  • Maintenance policies for instance handling (RESTART_IN_PLACE, MIGRATE_WITHIN_NODE_GROUP)
  • Autoscaling modes (ONLY_SCALE_OUT vs ON)

These omissions are intentional: the goal is to illustrate how each node group feature is wired, not provide drop-in sole-tenant modules. See the NodeGroup resource reference for all available configuration options.

Let's configure GCP Sole-Tenant Node Groups

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

Node Group Sizing & Autoscaling
Why does my node group get recreated when I change the size?
Due to API limitations, the provider can’t update node group size in place. Any size changes (through configuration or external modifications) cause the node group to be deleted and recreated.
Do I need to set initialSize or autoscalingPolicy?
You must configure one of these on resource creation. Use initialSize for a fixed number of nodes, or autoscalingPolicy to enable automatic scaling.
How do I enable autoscaling for my node group?
Configure autoscalingPolicy with mode, minNodes, and maxNodes. For example, set mode to ONLY_SCALE_OUT to allow growth without automatic shrinking.
Maintenance Configuration
What's the difference between AS_NEEDED and RECURRENT maintenance intervals?
AS_NEEDED allows hosts to receive updates as they become available. RECURRENT schedules updates periodically (no more than every 28 days), minimizing disruptions like live migrations and terminations.
What maintenance policies are available?
You can set maintenancePolicy to DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP to control how instances are handled during node maintenance. The default value is DEFAULT.
How do I configure a maintenance window?
Use the maintenanceWindow property with a startTime field to specify when maintenance can occur, as shown in the autoscaling policy example.
Resource Sharing & Multi-Project Setup
How do I share my node group with other projects?
Configure shareSettings with shareType set to SPECIFIC_PROJECTS and provide projectMaps containing the project IDs you want to share with.

Using a different cloud?

Explore compute guides for other cloud providers: