The gcp:compute/nodeTemplate:NodeTemplate resource, part of the Pulumi GCP provider, defines templates for creating sole-tenant nodes: hardware specifications, maintenance policies, and attached resources like GPUs and local SSDs. This guide focuses on three capabilities: node type and region configuration, server binding and restart policies, and GPU and local SSD attachment.
Node templates are referenced by node groups, which create the actual physical servers. The examples are intentionally small. Combine them with node groups and VM instance configurations for complete sole-tenant deployments.
Define a basic sole-tenant node template
Sole-tenant nodes provide dedicated physical servers for VMs, meeting compliance or licensing requirements that prohibit multi-tenant infrastructure.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const template = new gcp.compute.NodeTemplate("template", {
name: "soletenant-tmpl",
region: "us-central1",
nodeType: "n1-node-96-624",
});
import pulumi
import pulumi_gcp as gcp
template = gcp.compute.NodeTemplate("template",
name="soletenant-tmpl",
region="us-central1",
node_type="n1-node-96-624")
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := compute.NewNodeTemplate(ctx, "template", &compute.NodeTemplateArgs{
Name: pulumi.String("soletenant-tmpl"),
Region: pulumi.String("us-central1"),
NodeType: pulumi.String("n1-node-96-624"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var template = new Gcp.Compute.NodeTemplate("template", new()
{
Name = "soletenant-tmpl",
Region = "us-central1",
NodeType = "n1-node-96-624",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var template = new NodeTemplate("template", NodeTemplateArgs.builder()
.name("soletenant-tmpl")
.region("us-central1")
.nodeType("n1-node-96-624")
.build());
}
}
resources:
template:
type: gcp:compute:NodeTemplate
properties:
name: soletenant-tmpl
region: us-central1
nodeType: n1-node-96-624
The nodeType property specifies the hardware configuration (CPU cores and memory). The region determines where the physical nodes will be located. Node groups reference this template when provisioning dedicated hosts.
Control restart behavior with server binding
When Google performs maintenance on underlying hardware, the serverBinding policy controls whether VMs restart on the same physical server or migrate to different nodes.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const central1a = gcp.compute.getNodeTypes({
zone: "us-central1-a",
});
const template = new gcp.compute.NodeTemplate("template", {
name: "soletenant-with-licenses",
region: "us-central1",
nodeType: "n1-node-96-624",
nodeAffinityLabels: {
foo: "baz",
},
serverBinding: {
type: "RESTART_NODE_ON_MINIMAL_SERVERS",
},
});
import pulumi
import pulumi_gcp as gcp
central1a = gcp.compute.get_node_types(zone="us-central1-a")
template = gcp.compute.NodeTemplate("template",
name="soletenant-with-licenses",
region="us-central1",
node_type="n1-node-96-624",
node_affinity_labels={
"foo": "baz",
},
server_binding={
"type": "RESTART_NODE_ON_MINIMAL_SERVERS",
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := compute.GetNodeTypes(ctx, &compute.GetNodeTypesArgs{
Zone: pulumi.StringRef("us-central1-a"),
}, nil)
if err != nil {
return err
}
_, err = compute.NewNodeTemplate(ctx, "template", &compute.NodeTemplateArgs{
Name: pulumi.String("soletenant-with-licenses"),
Region: pulumi.String("us-central1"),
NodeType: pulumi.String("n1-node-96-624"),
NodeAffinityLabels: pulumi.StringMap{
"foo": pulumi.String("baz"),
},
ServerBinding: &compute.NodeTemplateServerBindingArgs{
Type: pulumi.String("RESTART_NODE_ON_MINIMAL_SERVERS"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var central1a = Gcp.Compute.GetNodeTypes.Invoke(new()
{
Zone = "us-central1-a",
});
var template = new Gcp.Compute.NodeTemplate("template", new()
{
Name = "soletenant-with-licenses",
Region = "us-central1",
NodeType = "n1-node-96-624",
NodeAffinityLabels =
{
{ "foo", "baz" },
},
ServerBinding = new Gcp.Compute.Inputs.NodeTemplateServerBindingArgs
{
Type = "RESTART_NODE_ON_MINIMAL_SERVERS",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ComputeFunctions;
import com.pulumi.gcp.compute.inputs.GetNodeTypesArgs;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.inputs.NodeTemplateServerBindingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var central1a = ComputeFunctions.getNodeTypes(GetNodeTypesArgs.builder()
.zone("us-central1-a")
.build());
var template = new NodeTemplate("template", NodeTemplateArgs.builder()
.name("soletenant-with-licenses")
.region("us-central1")
.nodeType("n1-node-96-624")
.nodeAffinityLabels(Map.of("foo", "baz"))
.serverBinding(NodeTemplateServerBindingArgs.builder()
.type("RESTART_NODE_ON_MINIMAL_SERVERS")
.build())
.build());
}
}
resources:
template:
type: gcp:compute:NodeTemplate
properties:
name: soletenant-with-licenses
region: us-central1
nodeType: n1-node-96-624
nodeAffinityLabels:
foo: baz
serverBinding:
type: RESTART_NODE_ON_MINIMAL_SERVERS
variables:
central1a:
fn::invoke:
function: gcp:compute:getNodeTypes
arguments:
zone: us-central1-a
The serverBinding type of RESTART_NODE_ON_MINIMAL_SERVERS keeps VMs on the same physical node after maintenance. The nodeAffinityLabels property adds key-value pairs that VMs can use for scheduling constraints, ensuring they land on nodes with matching labels.
Attach GPUs for compute-intensive workloads
Machine learning training and inference often require GPU acceleration beyond what standard CPU nodes provide.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const central1a = gcp.compute.getNodeTypes({
zone: "us-central1-a",
});
const template = new gcp.compute.NodeTemplate("template", {
name: "soletenant-with-accelerators",
region: "us-central1",
nodeType: "n1-node-96-624",
accelerators: [{
acceleratorType: "nvidia-tesla-t4",
acceleratorCount: 4,
}],
});
import pulumi
import pulumi_gcp as gcp
central1a = gcp.compute.get_node_types(zone="us-central1-a")
template = gcp.compute.NodeTemplate("template",
name="soletenant-with-accelerators",
region="us-central1",
node_type="n1-node-96-624",
accelerators=[{
"accelerator_type": "nvidia-tesla-t4",
"accelerator_count": 4,
}])
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := compute.GetNodeTypes(ctx, &compute.GetNodeTypesArgs{
Zone: pulumi.StringRef("us-central1-a"),
}, nil)
if err != nil {
return err
}
_, err = compute.NewNodeTemplate(ctx, "template", &compute.NodeTemplateArgs{
Name: pulumi.String("soletenant-with-accelerators"),
Region: pulumi.String("us-central1"),
NodeType: pulumi.String("n1-node-96-624"),
Accelerators: compute.NodeTemplateAcceleratorArray{
&compute.NodeTemplateAcceleratorArgs{
AcceleratorType: pulumi.String("nvidia-tesla-t4"),
AcceleratorCount: pulumi.Int(4),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var central1a = Gcp.Compute.GetNodeTypes.Invoke(new()
{
Zone = "us-central1-a",
});
var template = new Gcp.Compute.NodeTemplate("template", new()
{
Name = "soletenant-with-accelerators",
Region = "us-central1",
NodeType = "n1-node-96-624",
Accelerators = new[]
{
new Gcp.Compute.Inputs.NodeTemplateAcceleratorArgs
{
AcceleratorType = "nvidia-tesla-t4",
AcceleratorCount = 4,
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ComputeFunctions;
import com.pulumi.gcp.compute.inputs.GetNodeTypesArgs;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.inputs.NodeTemplateAcceleratorArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var central1a = ComputeFunctions.getNodeTypes(GetNodeTypesArgs.builder()
.zone("us-central1-a")
.build());
var template = new NodeTemplate("template", NodeTemplateArgs.builder()
.name("soletenant-with-accelerators")
.region("us-central1")
.nodeType("n1-node-96-624")
.accelerators(NodeTemplateAcceleratorArgs.builder()
.acceleratorType("nvidia-tesla-t4")
.acceleratorCount(4)
.build())
.build());
}
}
resources:
template:
type: gcp:compute:NodeTemplate
properties:
name: soletenant-with-accelerators
region: us-central1
nodeType: n1-node-96-624
accelerators:
- acceleratorType: nvidia-tesla-t4
acceleratorCount: 4
variables:
central1a:
fn::invoke:
function: gcp:compute:getNodeTypes
arguments:
zone: us-central1-a
The accelerators array specifies GPU type and count. Each node created from this template will have 4 NVIDIA Tesla T4 GPUs attached. VMs scheduled on these nodes can access the GPUs for parallel computation.
Configure local SSD storage for high-throughput workloads
Applications like databases and caching layers benefit from local SSDs that provide higher IOPS and lower latency than network-attached persistent disks.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const central1a = gcp.compute.getNodeTypes({
zone: "us-central1-a",
});
const template = new gcp.compute.NodeTemplate("template", {
name: "soletenant-with-disks",
region: "us-central1",
nodeType: "n2-node-80-640",
disks: [{
diskCount: 16,
diskSizeGb: 375,
diskType: "local-ssd",
}],
});
import pulumi
import pulumi_gcp as gcp
central1a = gcp.compute.get_node_types(zone="us-central1-a")
template = gcp.compute.NodeTemplate("template",
name="soletenant-with-disks",
region="us-central1",
node_type="n2-node-80-640",
disks=[{
"disk_count": 16,
"disk_size_gb": 375,
"disk_type": "local-ssd",
}])
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/compute"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := compute.GetNodeTypes(ctx, &compute.GetNodeTypesArgs{
Zone: pulumi.StringRef("us-central1-a"),
}, nil)
if err != nil {
return err
}
_, err = compute.NewNodeTemplate(ctx, "template", &compute.NodeTemplateArgs{
Name: pulumi.String("soletenant-with-disks"),
Region: pulumi.String("us-central1"),
NodeType: pulumi.String("n2-node-80-640"),
Disks: compute.NodeTemplateDiskArray{
&compute.NodeTemplateDiskArgs{
DiskCount: pulumi.Int(16),
DiskSizeGb: pulumi.Int(375),
DiskType: pulumi.String("local-ssd"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var central1a = Gcp.Compute.GetNodeTypes.Invoke(new()
{
Zone = "us-central1-a",
});
var template = new Gcp.Compute.NodeTemplate("template", new()
{
Name = "soletenant-with-disks",
Region = "us-central1",
NodeType = "n2-node-80-640",
Disks = new[]
{
new Gcp.Compute.Inputs.NodeTemplateDiskArgs
{
DiskCount = 16,
DiskSizeGb = 375,
DiskType = "local-ssd",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.ComputeFunctions;
import com.pulumi.gcp.compute.inputs.GetNodeTypesArgs;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.inputs.NodeTemplateDiskArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var central1a = ComputeFunctions.getNodeTypes(GetNodeTypesArgs.builder()
.zone("us-central1-a")
.build());
var template = new NodeTemplate("template", NodeTemplateArgs.builder()
.name("soletenant-with-disks")
.region("us-central1")
.nodeType("n2-node-80-640")
.disks(NodeTemplateDiskArgs.builder()
.diskCount(16)
.diskSizeGb(375)
.diskType("local-ssd")
.build())
.build());
}
}
resources:
template:
type: gcp:compute:NodeTemplate
properties:
name: soletenant-with-disks
region: us-central1
nodeType: n2-node-80-640
disks:
- diskCount: 16
diskSizeGb: 375
diskType: local-ssd
variables:
central1a:
fn::invoke:
function: gcp:compute:getNodeTypes
arguments:
zone: us-central1-a
The disks array specifies local SSD configuration. This template provisions 16 local SSDs of 375 GB each on every physical node. VMs can mount these disks for temporary high-performance storage that doesn’t persist beyond the node’s lifetime.
Beyond these examples
These snippets focus on specific node template features: node type and region selection, maintenance restart policies, and GPU and local SSD attachment. They’re intentionally minimal rather than full sole-tenant deployments.
The examples assume pre-existing infrastructure such as a GCP project with Compute Engine API enabled. They focus on template configuration rather than provisioning the node groups and VMs that use these templates.
To keep things focused, common template patterns are omitted, including:
- CPU overcommit settings (cpuOvercommitType)
- Flexible node type specifications (nodeTypeFlexibility)
- Custom descriptions and metadata
These omissions are intentional: the goal is to illustrate how each node template feature is wired, not provide drop-in sole-tenant modules. See the NodeTemplate resource reference for all available configuration options.
Let's configure GCP Compute Node Templates
Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.
Try Pulumi Cloud for FREEFrequently Asked Questions
Configuration & Immutability
name, region, nodeType, accelerators, disks, serverBinding, nodeAffinityLabels, and cpuOvercommitType all require replacement if changed.nodeType for a specific node type like n1-node-96-624, or nodeTypeFlexibility for flexible matching based on properties.Maintenance & Availability
serverBinding is required and determines where nodes restart after maintenance events. For example, RESTART_NODE_ON_MINIMAL_SERVERS controls restart behavior.Hardware Configuration
accelerators array with acceleratorType (e.g., nvidia-tesla-t4) and acceleratorCount.disks array specifying diskType (e.g., local-ssd), diskCount, and diskSizeGb.cpuOvercommitType is NONE. You can set it to ENABLED if needed.nodeAffinityLabels are used for instance scheduling, allowing you to control which instances run on nodes created from this template.