Configure GCP Compute Node Templates

The gcp:compute/nodeTemplate:NodeTemplate resource, part of the Pulumi GCP provider, defines the hardware specifications for sole-tenant nodes: machine type, region, and optional GPUs or local storage. This guide focuses on three capabilities: basic template creation, maintenance restart policies, and GPU and local SSD configuration.

Node templates are referenced by node groups, which provision the actual physical servers. VMs are then scheduled onto those nodes using affinity labels. The examples are intentionally small. Combine them with node groups and VM instance configurations for complete sole-tenant deployments.

Define a basic sole-tenant node template

Sole-tenant nodes provide dedicated physical servers for VMs, meeting compliance or licensing requirements that prohibit multi-tenant infrastructure.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const template = new gcp.compute.NodeTemplate("template", {
    name: "soletenant-tmpl",
    region: "us-central1",
    nodeType: "n1-node-96-624",
});
import pulumi
import pulumi_gcp as gcp

template = gcp.compute.NodeTemplate("template",
    name="soletenant-tmpl",
    region="us-central1",
    node_type="n1-node-96-624")
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.NewNodeTemplate(ctx, "template", &compute.NodeTemplateArgs{
			Name:     pulumi.String("soletenant-tmpl"),
			Region:   pulumi.String("us-central1"),
			NodeType: pulumi.String("n1-node-96-624"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var template = new Gcp.Compute.NodeTemplate("template", new()
    {
        Name = "soletenant-tmpl",
        Region = "us-central1",
        NodeType = "n1-node-96-624",
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var template = new NodeTemplate("template", NodeTemplateArgs.builder()
            .name("soletenant-tmpl")
            .region("us-central1")
            .nodeType("n1-node-96-624")
            .build());

    }
}
resources:
  template:
    type: gcp:compute:NodeTemplate
    properties:
      name: soletenant-tmpl
      region: us-central1
      nodeType: n1-node-96-624

The nodeType property specifies the machine family and resource allocation (96 vCPUs, 624 GB memory in this case). The region determines where Google will provision the physical hardware. Node groups reference this template when creating actual nodes.

Control restart behavior with server binding

When Google performs maintenance on underlying hardware, the serverBinding policy controls whether VMs restart on the same physical server or migrate elsewhere.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const central1a = gcp.compute.getNodeTypes({
    zone: "us-central1-a",
});
const template = new gcp.compute.NodeTemplate("template", {
    name: "soletenant-with-licenses",
    region: "us-central1",
    nodeType: "n1-node-96-624",
    nodeAffinityLabels: {
        foo: "baz",
    },
    serverBinding: {
        type: "RESTART_NODE_ON_MINIMAL_SERVERS",
    },
});
import pulumi
import pulumi_gcp as gcp

central1a = gcp.compute.get_node_types(zone="us-central1-a")
template = gcp.compute.NodeTemplate("template",
    name="soletenant-with-licenses",
    region="us-central1",
    node_type="n1-node-96-624",
    node_affinity_labels={
        "foo": "baz",
    },
    server_binding={
        "type": "RESTART_NODE_ON_MINIMAL_SERVERS",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.GetNodeTypes(ctx, &compute.GetNodeTypesArgs{
			Zone: pulumi.StringRef("us-central1-a"),
		}, nil)
		if err != nil {
			return err
		}
		_, err = compute.NewNodeTemplate(ctx, "template", &compute.NodeTemplateArgs{
			Name:     pulumi.String("soletenant-with-licenses"),
			Region:   pulumi.String("us-central1"),
			NodeType: pulumi.String("n1-node-96-624"),
			NodeAffinityLabels: pulumi.StringMap{
				"foo": pulumi.String("baz"),
			},
			ServerBinding: &compute.NodeTemplateServerBindingArgs{
				Type: pulumi.String("RESTART_NODE_ON_MINIMAL_SERVERS"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var central1a = Gcp.Compute.GetNodeTypes.Invoke(new()
    {
        Zone = "us-central1-a",
    });

    var template = new Gcp.Compute.NodeTemplate("template", new()
    {
        Name = "soletenant-with-licenses",
        Region = "us-central1",
        NodeType = "n1-node-96-624",
        NodeAffinityLabels = 
        {
            { "foo", "baz" },
        },
        ServerBinding = new Gcp.Compute.Inputs.NodeTemplateServerBindingArgs
        {
            Type = "RESTART_NODE_ON_MINIMAL_SERVERS",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ComputeFunctions;
import com.pulumi.gcp.compute.inputs.GetNodeTypesArgs;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.inputs.NodeTemplateServerBindingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var central1a = ComputeFunctions.getNodeTypes(GetNodeTypesArgs.builder()
            .zone("us-central1-a")
            .build());

        var template = new NodeTemplate("template", NodeTemplateArgs.builder()
            .name("soletenant-with-licenses")
            .region("us-central1")
            .nodeType("n1-node-96-624")
            .nodeAffinityLabels(Map.of("foo", "baz"))
            .serverBinding(NodeTemplateServerBindingArgs.builder()
                .type("RESTART_NODE_ON_MINIMAL_SERVERS")
                .build())
            .build());

    }
}
resources:
  template:
    type: gcp:compute:NodeTemplate
    properties:
      name: soletenant-with-licenses
      region: us-central1
      nodeType: n1-node-96-624
      nodeAffinityLabels:
        foo: baz
      serverBinding:
        type: RESTART_NODE_ON_MINIMAL_SERVERS
variables:
  central1a:
    fn::invoke:
      function: gcp:compute:getNodeTypes
      arguments:
        zone: us-central1-a

The serverBinding type of RESTART_NODE_ON_MINIMAL_SERVERS keeps VMs on the same physical node after maintenance, useful for licensing tied to hardware identifiers. The nodeAffinityLabels provide key-value pairs that VMs can use in scheduling constraints to target specific nodes.

Attach GPUs for compute-intensive workloads

Machine learning and rendering workloads often require GPU acceleration available on the physical node.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const central1a = gcp.compute.getNodeTypes({
    zone: "us-central1-a",
});
const template = new gcp.compute.NodeTemplate("template", {
    name: "soletenant-with-accelerators",
    region: "us-central1",
    nodeType: "n1-node-96-624",
    accelerators: [{
        acceleratorType: "nvidia-tesla-t4",
        acceleratorCount: 4,
    }],
});
import pulumi
import pulumi_gcp as gcp

central1a = gcp.compute.get_node_types(zone="us-central1-a")
template = gcp.compute.NodeTemplate("template",
    name="soletenant-with-accelerators",
    region="us-central1",
    node_type="n1-node-96-624",
    accelerators=[{
        "accelerator_type": "nvidia-tesla-t4",
        "accelerator_count": 4,
    }])
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.GetNodeTypes(ctx, &compute.GetNodeTypesArgs{
			Zone: pulumi.StringRef("us-central1-a"),
		}, nil)
		if err != nil {
			return err
		}
		_, err = compute.NewNodeTemplate(ctx, "template", &compute.NodeTemplateArgs{
			Name:     pulumi.String("soletenant-with-accelerators"),
			Region:   pulumi.String("us-central1"),
			NodeType: pulumi.String("n1-node-96-624"),
			Accelerators: compute.NodeTemplateAcceleratorArray{
				&compute.NodeTemplateAcceleratorArgs{
					AcceleratorType:  pulumi.String("nvidia-tesla-t4"),
					AcceleratorCount: pulumi.Int(4),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var central1a = Gcp.Compute.GetNodeTypes.Invoke(new()
    {
        Zone = "us-central1-a",
    });

    var template = new Gcp.Compute.NodeTemplate("template", new()
    {
        Name = "soletenant-with-accelerators",
        Region = "us-central1",
        NodeType = "n1-node-96-624",
        Accelerators = new[]
        {
            new Gcp.Compute.Inputs.NodeTemplateAcceleratorArgs
            {
                AcceleratorType = "nvidia-tesla-t4",
                AcceleratorCount = 4,
            },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ComputeFunctions;
import com.pulumi.gcp.compute.inputs.GetNodeTypesArgs;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.inputs.NodeTemplateAcceleratorArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var central1a = ComputeFunctions.getNodeTypes(GetNodeTypesArgs.builder()
            .zone("us-central1-a")
            .build());

        var template = new NodeTemplate("template", NodeTemplateArgs.builder()
            .name("soletenant-with-accelerators")
            .region("us-central1")
            .nodeType("n1-node-96-624")
            .accelerators(NodeTemplateAcceleratorArgs.builder()
                .acceleratorType("nvidia-tesla-t4")
                .acceleratorCount(4)
                .build())
            .build());

    }
}
resources:
  template:
    type: gcp:compute:NodeTemplate
    properties:
      name: soletenant-with-accelerators
      region: us-central1
      nodeType: n1-node-96-624
      accelerators:
        - acceleratorType: nvidia-tesla-t4
          acceleratorCount: 4
variables:
  central1a:
    fn::invoke:
      function: gcp:compute:getNodeTypes
      arguments:
        zone: us-central1-a

The accelerators array specifies GPU type and count per node. Here, each physical node will have four NVIDIA Tesla T4 GPUs. VMs scheduled on these nodes can access the attached accelerators for compute-intensive tasks.

Configure local SSD storage for high IOPS

Applications requiring high-throughput local storage can use local SSDs attached directly to the physical node.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const central1a = gcp.compute.getNodeTypes({
    zone: "us-central1-a",
});
const template = new gcp.compute.NodeTemplate("template", {
    name: "soletenant-with-disks",
    region: "us-central1",
    nodeType: "n2-node-80-640",
    disks: [{
        diskCount: 16,
        diskSizeGb: 375,
        diskType: "local-ssd",
    }],
});
import pulumi
import pulumi_gcp as gcp

central1a = gcp.compute.get_node_types(zone="us-central1-a")
template = gcp.compute.NodeTemplate("template",
    name="soletenant-with-disks",
    region="us-central1",
    node_type="n2-node-80-640",
    disks=[{
        "disk_count": 16,
        "disk_size_gb": 375,
        "disk_type": "local-ssd",
    }])
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := compute.GetNodeTypes(ctx, &compute.GetNodeTypesArgs{
			Zone: pulumi.StringRef("us-central1-a"),
		}, nil)
		if err != nil {
			return err
		}
		_, err = compute.NewNodeTemplate(ctx, "template", &compute.NodeTemplateArgs{
			Name:     pulumi.String("soletenant-with-disks"),
			Region:   pulumi.String("us-central1"),
			NodeType: pulumi.String("n2-node-80-640"),
			Disks: compute.NodeTemplateDiskArray{
				&compute.NodeTemplateDiskArgs{
					DiskCount:  pulumi.Int(16),
					DiskSizeGb: pulumi.Int(375),
					DiskType:   pulumi.String("local-ssd"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var central1a = Gcp.Compute.GetNodeTypes.Invoke(new()
    {
        Zone = "us-central1-a",
    });

    var template = new Gcp.Compute.NodeTemplate("template", new()
    {
        Name = "soletenant-with-disks",
        Region = "us-central1",
        NodeType = "n2-node-80-640",
        Disks = new[]
        {
            new Gcp.Compute.Inputs.NodeTemplateDiskArgs
            {
                DiskCount = 16,
                DiskSizeGb = 375,
                DiskType = "local-ssd",
            },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ComputeFunctions;
import com.pulumi.gcp.compute.inputs.GetNodeTypesArgs;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.inputs.NodeTemplateDiskArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var central1a = ComputeFunctions.getNodeTypes(GetNodeTypesArgs.builder()
            .zone("us-central1-a")
            .build());

        var template = new NodeTemplate("template", NodeTemplateArgs.builder()
            .name("soletenant-with-disks")
            .region("us-central1")
            .nodeType("n2-node-80-640")
            .disks(NodeTemplateDiskArgs.builder()
                .diskCount(16)
                .diskSizeGb(375)
                .diskType("local-ssd")
                .build())
            .build());

    }
}
resources:
  template:
    type: gcp:compute:NodeTemplate
    properties:
      name: soletenant-with-disks
      region: us-central1
      nodeType: n2-node-80-640
      disks:
        - diskCount: 16
          diskSizeGb: 375
          diskType: local-ssd
variables:
  central1a:
    fn::invoke:
      function: gcp:compute:getNodeTypes
      arguments:
        zone: us-central1-a

The disks array specifies local SSD configuration. This template provisions 16 local SSDs of 375 GB each per node, providing low-latency storage for database workloads. The diskType of local-ssd indicates these are physically attached drives, not network storage.

Beyond these examples

These snippets focus on specific node template features: node type and region selection, maintenance restart policies, and GPU and local SSD attachment. They’re intentionally minimal rather than full sole-tenant deployments.

The examples assume pre-existing infrastructure such as a GCP project with Compute Engine API enabled, and a region where sole-tenant nodes are supported. They focus on template configuration rather than provisioning node groups or VMs.

To keep things focused, common node template patterns are omitted, including:

  • CPU overcommit settings (cpuOvercommitType)
  • Flexible node type specifications (nodeTypeFlexibility)
  • Node group creation and VM placement

These omissions are intentional: the goal is to illustrate how each node template feature is wired, not provide drop-in sole-tenant modules. See the NodeTemplate resource reference for all available configuration options.

Let's configure GCP Compute Node Templates

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

Configuration & Immutability
What properties can't I change after creating a node template?
Nearly all properties are immutable, including name, region, nodeType, serverBinding, accelerators, disks, nodeAffinityLabels, cpuOvercommitType, and nodeTypeFlexibility. Changes to any of these properties require destroying and recreating the node template.
What's the difference between nodeType and nodeTypeFlexibility?
nodeType specifies an exact node type (like n1-node-96-624), while nodeTypeFlexibility allows flexible matching based on properties. Only one can be specified—they’re mutually exclusive.
Is serverBinding required, and what does it do?
Yes, serverBinding is required. It determines where nodes restart following a maintenance event, such as RESTART_NODE_ON_MINIMAL_SERVERS.
Hardware Configuration
How do I add GPUs to my node template?
Configure the accelerators property with acceleratorType (like nvidia-tesla-t4) and acceleratorCount to attach GPU cards.
How do I configure local SSDs for my node template?
Use the disks property to specify diskCount, diskSizeGb, and diskType (such as local-ssd).
What does cpuOvercommitType do?
cpuOvercommitType controls CPU overcommit behavior. It defaults to NONE but can be set to ENABLED to allow overcommitting CPU resources.
Import & Management
What import formats are supported for node templates?
Node templates support four import formats: full path (projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}), project/region/name ({{project}}/{{region}}/{{name}}), region/name ({{region}}/{{name}}), or just name ({{name}}).

Using a different cloud?

Explore compute guides for other cloud providers: