1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. compute
  5. NodeGroup
Google Cloud Classic v7.2.2 published on Monday, Jan 1, 0001 by Pulumi

gcp.compute.NodeGroup

Explore with Pulumi AI

gcp logo
Google Cloud Classic v7.2.2 published on Monday, Jan 1, 0001 by Pulumi

    Represents a NodeGroup resource to manage a group of sole-tenant nodes.

    To get more information about NodeGroup, see:

    Warning: Due to limitations of the API, this provider cannot update the number of nodes in a node group and changes to node group size either through provider config or through external changes will cause the provider to delete and recreate the node group.

    Example Usage

    Node Group Basic

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
        {
            Region = "us-central1",
            NodeType = "n1-node-96-624",
        });
    
        var nodes = new Gcp.Compute.NodeGroup("nodes", new()
        {
            Zone = "us-central1-a",
            Description = "example google_compute_node_group for the Google Provider",
            InitialSize = 1,
            NodeTemplate = soletenant_tmpl.Id,
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/compute"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
    			Region:   pulumi.String("us-central1"),
    			NodeType: pulumi.String("n1-node-96-624"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
    			Zone:         pulumi.String("us-central1-a"),
    			Description:  pulumi.String("example google_compute_node_group for the Google Provider"),
    			InitialSize:  pulumi.Int(1),
    			NodeTemplate: soletenant_tmpl.ID(),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.compute.NodeTemplate;
    import com.pulumi.gcp.compute.NodeTemplateArgs;
    import com.pulumi.gcp.compute.NodeGroup;
    import com.pulumi.gcp.compute.NodeGroupArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()        
                .region("us-central1")
                .nodeType("n1-node-96-624")
                .build());
    
            var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()        
                .zone("us-central1-a")
                .description("example google_compute_node_group for the Google Provider")
                .initialSize(1)
                .nodeTemplate(soletenant_tmpl.id())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
        region="us-central1",
        node_type="n1-node-96-624")
    nodes = gcp.compute.NodeGroup("nodes",
        zone="us-central1-a",
        description="example google_compute_node_group for the Google Provider",
        initial_size=1,
        node_template=soletenant_tmpl.id)
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
        region: "us-central1",
        nodeType: "n1-node-96-624",
    });
    const nodes = new gcp.compute.NodeGroup("nodes", {
        zone: "us-central1-a",
        description: "example google_compute_node_group for the Google Provider",
        initialSize: 1,
        nodeTemplate: soletenant_tmpl.id,
    });
    
    resources:
      soletenant-tmpl:
        type: gcp:compute:NodeTemplate
        properties:
          region: us-central1
          nodeType: n1-node-96-624
      nodes:
        type: gcp:compute:NodeGroup
        properties:
          zone: us-central1-a
          description: example google_compute_node_group for the Google Provider
          initialSize: 1
          nodeTemplate: ${["soletenant-tmpl"].id}
    

    Node Group Maintenance Interval

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
        {
            Region = "us-central1",
            NodeType = "c2-node-60-240",
        }, new CustomResourceOptions
        {
            Provider = google_beta,
        });
    
        var nodes = new Gcp.Compute.NodeGroup("nodes", new()
        {
            Zone = "us-central1-a",
            Description = "example google_compute_node_group for Terraform Google Provider",
            InitialSize = 1,
            NodeTemplate = soletenant_tmpl.Id,
            MaintenanceInterval = "RECURRENT",
        }, new CustomResourceOptions
        {
            Provider = google_beta,
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/compute"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
    			Region:   pulumi.String("us-central1"),
    			NodeType: pulumi.String("c2-node-60-240"),
    		}, pulumi.Provider(google_beta))
    		if err != nil {
    			return err
    		}
    		_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
    			Zone:                pulumi.String("us-central1-a"),
    			Description:         pulumi.String("example google_compute_node_group for Terraform Google Provider"),
    			InitialSize:         pulumi.Int(1),
    			NodeTemplate:        soletenant_tmpl.ID(),
    			MaintenanceInterval: pulumi.String("RECURRENT"),
    		}, pulumi.Provider(google_beta))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.compute.NodeTemplate;
    import com.pulumi.gcp.compute.NodeTemplateArgs;
    import com.pulumi.gcp.compute.NodeGroup;
    import com.pulumi.gcp.compute.NodeGroupArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()        
                .region("us-central1")
                .nodeType("c2-node-60-240")
                .build(), CustomResourceOptions.builder()
                    .provider(google_beta)
                    .build());
    
            var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()        
                .zone("us-central1-a")
                .description("example google_compute_node_group for Terraform Google Provider")
                .initialSize(1)
                .nodeTemplate(soletenant_tmpl.id())
                .maintenanceInterval("RECURRENT")
                .build(), CustomResourceOptions.builder()
                    .provider(google_beta)
                    .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
        region="us-central1",
        node_type="c2-node-60-240",
        opts=pulumi.ResourceOptions(provider=google_beta))
    nodes = gcp.compute.NodeGroup("nodes",
        zone="us-central1-a",
        description="example google_compute_node_group for Terraform Google Provider",
        initial_size=1,
        node_template=soletenant_tmpl.id,
        maintenance_interval="RECURRENT",
        opts=pulumi.ResourceOptions(provider=google_beta))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
        region: "us-central1",
        nodeType: "c2-node-60-240",
    }, {
        provider: google_beta,
    });
    const nodes = new gcp.compute.NodeGroup("nodes", {
        zone: "us-central1-a",
        description: "example google_compute_node_group for Terraform Google Provider",
        initialSize: 1,
        nodeTemplate: soletenant_tmpl.id,
        maintenanceInterval: "RECURRENT",
    }, {
        provider: google_beta,
    });
    
    resources:
      soletenant-tmpl:
        type: gcp:compute:NodeTemplate
        properties:
          region: us-central1
          nodeType: c2-node-60-240
        options:
          provider: ${["google-beta"]}
      nodes:
        type: gcp:compute:NodeGroup
        properties:
          zone: us-central1-a
          description: example google_compute_node_group for Terraform Google Provider
          initialSize: 1
          nodeTemplate: ${["soletenant-tmpl"].id}
          maintenanceInterval: RECURRENT
        options:
          provider: ${["google-beta"]}
    

    Node Group Autoscaling Policy

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
        {
            Region = "us-central1",
            NodeType = "n1-node-96-624",
        });
    
        var nodes = new Gcp.Compute.NodeGroup("nodes", new()
        {
            Zone = "us-central1-a",
            Description = "example google_compute_node_group for Google Provider",
            MaintenancePolicy = "RESTART_IN_PLACE",
            MaintenanceWindow = new Gcp.Compute.Inputs.NodeGroupMaintenanceWindowArgs
            {
                StartTime = "08:00",
            },
            InitialSize = 1,
            NodeTemplate = soletenant_tmpl.Id,
            AutoscalingPolicy = new Gcp.Compute.Inputs.NodeGroupAutoscalingPolicyArgs
            {
                Mode = "ONLY_SCALE_OUT",
                MinNodes = 1,
                MaxNodes = 10,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/compute"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
    			Region:   pulumi.String("us-central1"),
    			NodeType: pulumi.String("n1-node-96-624"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
    			Zone:              pulumi.String("us-central1-a"),
    			Description:       pulumi.String("example google_compute_node_group for Google Provider"),
    			MaintenancePolicy: pulumi.String("RESTART_IN_PLACE"),
    			MaintenanceWindow: &compute.NodeGroupMaintenanceWindowArgs{
    				StartTime: pulumi.String("08:00"),
    			},
    			InitialSize:  pulumi.Int(1),
    			NodeTemplate: soletenant_tmpl.ID(),
    			AutoscalingPolicy: &compute.NodeGroupAutoscalingPolicyArgs{
    				Mode:     pulumi.String("ONLY_SCALE_OUT"),
    				MinNodes: pulumi.Int(1),
    				MaxNodes: pulumi.Int(10),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.compute.NodeTemplate;
    import com.pulumi.gcp.compute.NodeTemplateArgs;
    import com.pulumi.gcp.compute.NodeGroup;
    import com.pulumi.gcp.compute.NodeGroupArgs;
    import com.pulumi.gcp.compute.inputs.NodeGroupMaintenanceWindowArgs;
    import com.pulumi.gcp.compute.inputs.NodeGroupAutoscalingPolicyArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()        
                .region("us-central1")
                .nodeType("n1-node-96-624")
                .build());
    
            var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()        
                .zone("us-central1-a")
                .description("example google_compute_node_group for Google Provider")
                .maintenancePolicy("RESTART_IN_PLACE")
                .maintenanceWindow(NodeGroupMaintenanceWindowArgs.builder()
                    .startTime("08:00")
                    .build())
                .initialSize(1)
                .nodeTemplate(soletenant_tmpl.id())
                .autoscalingPolicy(NodeGroupAutoscalingPolicyArgs.builder()
                    .mode("ONLY_SCALE_OUT")
                    .minNodes(1)
                    .maxNodes(10)
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
        region="us-central1",
        node_type="n1-node-96-624")
    nodes = gcp.compute.NodeGroup("nodes",
        zone="us-central1-a",
        description="example google_compute_node_group for Google Provider",
        maintenance_policy="RESTART_IN_PLACE",
        maintenance_window=gcp.compute.NodeGroupMaintenanceWindowArgs(
            start_time="08:00",
        ),
        initial_size=1,
        node_template=soletenant_tmpl.id,
        autoscaling_policy=gcp.compute.NodeGroupAutoscalingPolicyArgs(
            mode="ONLY_SCALE_OUT",
            min_nodes=1,
            max_nodes=10,
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
        region: "us-central1",
        nodeType: "n1-node-96-624",
    });
    const nodes = new gcp.compute.NodeGroup("nodes", {
        zone: "us-central1-a",
        description: "example google_compute_node_group for Google Provider",
        maintenancePolicy: "RESTART_IN_PLACE",
        maintenanceWindow: {
            startTime: "08:00",
        },
        initialSize: 1,
        nodeTemplate: soletenant_tmpl.id,
        autoscalingPolicy: {
            mode: "ONLY_SCALE_OUT",
            minNodes: 1,
            maxNodes: 10,
        },
    });
    
    resources:
      soletenant-tmpl:
        type: gcp:compute:NodeTemplate
        properties:
          region: us-central1
          nodeType: n1-node-96-624
      nodes:
        type: gcp:compute:NodeGroup
        properties:
          zone: us-central1-a
          description: example google_compute_node_group for Google Provider
          maintenancePolicy: RESTART_IN_PLACE
          maintenanceWindow:
            startTime: 08:00
          initialSize: 1
          nodeTemplate: ${["soletenant-tmpl"].id}
          autoscalingPolicy:
            mode: ONLY_SCALE_OUT
            minNodes: 1
            maxNodes: 10
    

    Node Group Share Settings

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var guestProject = new Gcp.Organizations.Project("guestProject", new()
        {
            ProjectId = "project-id",
            OrgId = "123456789",
        });
    
        var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
        {
            Region = "us-central1",
            NodeType = "n1-node-96-624",
        });
    
        var nodes = new Gcp.Compute.NodeGroup("nodes", new()
        {
            Zone = "us-central1-f",
            Description = "example google_compute_node_group for Terraform Google Provider",
            InitialSize = 1,
            NodeTemplate = soletenant_tmpl.Id,
            ShareSettings = new Gcp.Compute.Inputs.NodeGroupShareSettingsArgs
            {
                ShareType = "SPECIFIC_PROJECTS",
                ProjectMaps = new[]
                {
                    new Gcp.Compute.Inputs.NodeGroupShareSettingsProjectMapArgs
                    {
                        Id = guestProject.ProjectId,
                        ProjectId = guestProject.ProjectId,
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/compute"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/organizations"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		guestProject, err := organizations.NewProject(ctx, "guestProject", &organizations.ProjectArgs{
    			ProjectId: pulumi.String("project-id"),
    			OrgId:     pulumi.String("123456789"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
    			Region:   pulumi.String("us-central1"),
    			NodeType: pulumi.String("n1-node-96-624"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
    			Zone:         pulumi.String("us-central1-f"),
    			Description:  pulumi.String("example google_compute_node_group for Terraform Google Provider"),
    			InitialSize:  pulumi.Int(1),
    			NodeTemplate: soletenant_tmpl.ID(),
    			ShareSettings: &compute.NodeGroupShareSettingsArgs{
    				ShareType: pulumi.String("SPECIFIC_PROJECTS"),
    				ProjectMaps: compute.NodeGroupShareSettingsProjectMapArray{
    					&compute.NodeGroupShareSettingsProjectMapArgs{
    						Id:        guestProject.ProjectId,
    						ProjectId: guestProject.ProjectId,
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.organizations.Project;
    import com.pulumi.gcp.organizations.ProjectArgs;
    import com.pulumi.gcp.compute.NodeTemplate;
    import com.pulumi.gcp.compute.NodeTemplateArgs;
    import com.pulumi.gcp.compute.NodeGroup;
    import com.pulumi.gcp.compute.NodeGroupArgs;
    import com.pulumi.gcp.compute.inputs.NodeGroupShareSettingsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var guestProject = new Project("guestProject", ProjectArgs.builder()        
                .projectId("project-id")
                .orgId("123456789")
                .build());
    
            var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()        
                .region("us-central1")
                .nodeType("n1-node-96-624")
                .build());
    
            var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()        
                .zone("us-central1-f")
                .description("example google_compute_node_group for Terraform Google Provider")
                .initialSize(1)
                .nodeTemplate(soletenant_tmpl.id())
                .shareSettings(NodeGroupShareSettingsArgs.builder()
                    .shareType("SPECIFIC_PROJECTS")
                    .projectMaps(NodeGroupShareSettingsProjectMapArgs.builder()
                        .id(guestProject.projectId())
                        .projectId(guestProject.projectId())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    guest_project = gcp.organizations.Project("guestProject",
        project_id="project-id",
        org_id="123456789")
    soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
        region="us-central1",
        node_type="n1-node-96-624")
    nodes = gcp.compute.NodeGroup("nodes",
        zone="us-central1-f",
        description="example google_compute_node_group for Terraform Google Provider",
        initial_size=1,
        node_template=soletenant_tmpl.id,
        share_settings=gcp.compute.NodeGroupShareSettingsArgs(
            share_type="SPECIFIC_PROJECTS",
            project_maps=[gcp.compute.NodeGroupShareSettingsProjectMapArgs(
                id=guest_project.project_id,
                project_id=guest_project.project_id,
            )],
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const guestProject = new gcp.organizations.Project("guestProject", {
        projectId: "project-id",
        orgId: "123456789",
    });
    const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
        region: "us-central1",
        nodeType: "n1-node-96-624",
    });
    const nodes = new gcp.compute.NodeGroup("nodes", {
        zone: "us-central1-f",
        description: "example google_compute_node_group for Terraform Google Provider",
        initialSize: 1,
        nodeTemplate: soletenant_tmpl.id,
        shareSettings: {
            shareType: "SPECIFIC_PROJECTS",
            projectMaps: [{
                id: guestProject.projectId,
                projectId: guestProject.projectId,
            }],
        },
    });
    
    resources:
      guestProject:
        type: gcp:organizations:Project
        properties:
          projectId: project-id
          orgId: '123456789'
      soletenant-tmpl:
        type: gcp:compute:NodeTemplate
        properties:
          region: us-central1
          nodeType: n1-node-96-624
      nodes:
        type: gcp:compute:NodeGroup
        properties:
          zone: us-central1-f
          description: example google_compute_node_group for Terraform Google Provider
          initialSize: 1
          nodeTemplate: ${["soletenant-tmpl"].id}
          shareSettings:
            shareType: SPECIFIC_PROJECTS
            projectMaps:
              - id: ${guestProject.projectId}
                projectId: ${guestProject.projectId}
    

    Create NodeGroup Resource

    new NodeGroup(name: string, args: NodeGroupArgs, opts?: CustomResourceOptions);
    @overload
    def NodeGroup(resource_name: str,
                  opts: Optional[ResourceOptions] = None,
                  autoscaling_policy: Optional[NodeGroupAutoscalingPolicyArgs] = None,
                  description: Optional[str] = None,
                  initial_size: Optional[int] = None,
                  maintenance_interval: Optional[str] = None,
                  maintenance_policy: Optional[str] = None,
                  maintenance_window: Optional[NodeGroupMaintenanceWindowArgs] = None,
                  name: Optional[str] = None,
                  node_template: Optional[str] = None,
                  project: Optional[str] = None,
                  share_settings: Optional[NodeGroupShareSettingsArgs] = None,
                  zone: Optional[str] = None)
    @overload
    def NodeGroup(resource_name: str,
                  args: NodeGroupArgs,
                  opts: Optional[ResourceOptions] = None)
    func NewNodeGroup(ctx *Context, name string, args NodeGroupArgs, opts ...ResourceOption) (*NodeGroup, error)
    public NodeGroup(string name, NodeGroupArgs args, CustomResourceOptions? opts = null)
    public NodeGroup(String name, NodeGroupArgs args)
    public NodeGroup(String name, NodeGroupArgs args, CustomResourceOptions options)
    
    type: gcp:compute:NodeGroup
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args NodeGroupArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args NodeGroupArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args NodeGroupArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args NodeGroupArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args NodeGroupArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    NodeGroup Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The NodeGroup resource accepts the following input properties:

    NodeTemplate string

    The URL of the node template to which this node group belongs.


    AutoscalingPolicy NodeGroupAutoscalingPolicy

    If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. One of initial_size or autoscaling_policy must be configured on resource creation. Structure is documented below.

    Description string

    An optional textual description of the resource.

    InitialSize int

    The initial number of nodes in the node group. One of initial_size or autoscaling_policy must be configured on resource creation.

    MaintenanceInterval string

    Specifies the frequency of planned maintenance events. Set to one of the following: - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]

    MaintenancePolicy string

    Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.

    MaintenanceWindow NodeGroupMaintenanceWindow

    contains properties for the timeframe of maintenance Structure is documented below.

    Name string

    Name of the resource.

    Project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    ShareSettings NodeGroupShareSettings

    Share settings for the node group. Structure is documented below.

    Zone string

    Zone where this node group is located

    NodeTemplate string

    The URL of the node template to which this node group belongs.


    AutoscalingPolicy NodeGroupAutoscalingPolicyArgs

    If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. One of initial_size or autoscaling_policy must be configured on resource creation. Structure is documented below.

    Description string

    An optional textual description of the resource.

    InitialSize int

    The initial number of nodes in the node group. One of initial_size or autoscaling_policy must be configured on resource creation.

    MaintenanceInterval string

    Specifies the frequency of planned maintenance events. Set to one of the following: - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]

    MaintenancePolicy string

    Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.

    MaintenanceWindow NodeGroupMaintenanceWindowArgs

    contains properties for the timeframe of maintenance Structure is documented below.

    Name string

    Name of the resource.

    Project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    ShareSettings NodeGroupShareSettingsArgs

    Share settings for the node group. Structure is documented below.

    Zone string

    Zone where this node group is located

    nodeTemplate String

    The URL of the node template to which this node group belongs.


    autoscalingPolicy NodeGroupAutoscalingPolicy

    If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. One of initial_size or autoscaling_policy must be configured on resource creation. Structure is documented below.

    description String

    An optional textual description of the resource.

    initialSize Integer

    The initial number of nodes in the node group. One of initial_size or autoscaling_policy must be configured on resource creation.

    maintenanceInterval String

    Specifies the frequency of planned maintenance events. Set to one of the following: - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]

    maintenancePolicy String

    Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.

    maintenanceWindow NodeGroupMaintenanceWindow

    contains properties for the timeframe of maintenance Structure is documented below.

    name String

    Name of the resource.

    project String

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    shareSettings NodeGroupShareSettings

    Share settings for the node group. Structure is documented below.

    zone String

    Zone where this node group is located

    nodeTemplate string

    The URL of the node template to which this node group belongs.


    autoscalingPolicy NodeGroupAutoscalingPolicy

    If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. One of initial_size or autoscaling_policy must be configured on resource creation. Structure is documented below.

    description string

    An optional textual description of the resource.

    initialSize number

    The initial number of nodes in the node group. One of initial_size or autoscaling_policy must be configured on resource creation.

    maintenanceInterval string

    Specifies the frequency of planned maintenance events. Set to one of the following: - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]

    maintenancePolicy string

    Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.

    maintenanceWindow NodeGroupMaintenanceWindow

    contains properties for the timeframe of maintenance Structure is documented below.

    name string

    Name of the resource.

    project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    shareSettings NodeGroupShareSettings

    Share settings for the node group. Structure is documented below.

    zone string

    Zone where this node group is located

    node_template str

    The URL of the node template to which this node group belongs.


    autoscaling_policy NodeGroupAutoscalingPolicyArgs

    If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. One of initial_size or autoscaling_policy must be configured on resource creation. Structure is documented below.

    description str

    An optional textual description of the resource.

    initial_size int

    The initial number of nodes in the node group. One of initial_size or autoscaling_policy must be configured on resource creation.

    maintenance_interval str

    Specifies the frequency of planned maintenance events. Set to one of the following: - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]

    maintenance_policy str

    Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.

    maintenance_window NodeGroupMaintenanceWindowArgs

    contains properties for the timeframe of maintenance Structure is documented below.

    name str

    Name of the resource.

    project str

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    share_settings NodeGroupShareSettingsArgs

    Share settings for the node group. Structure is documented below.

    zone str

    Zone where this node group is located

    nodeTemplate String

    The URL of the node template to which this node group belongs.


    autoscalingPolicy Property Map

    If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. One of initial_size or autoscaling_policy must be configured on resource creation. Structure is documented below.

    description String

    An optional textual description of the resource.

    initialSize Number

    The initial number of nodes in the node group. One of initial_size or autoscaling_policy must be configured on resource creation.

    maintenanceInterval String

    Specifies the frequency of planned maintenance events. Set to one of the following: - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]

    maintenancePolicy String

    Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.

    maintenanceWindow Property Map

    contains properties for the timeframe of maintenance Structure is documented below.

    name String

    Name of the resource.

    project String

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    shareSettings Property Map

    Share settings for the node group. Structure is documented below.

    zone String

    Zone where this node group is located

    Outputs

    All input properties are implicitly available as output properties. Additionally, the NodeGroup resource produces the following output properties:

    CreationTimestamp string

    Creation timestamp in RFC3339 text format.

    Id string

    The provider-assigned unique ID for this managed resource.

    SelfLink string

    The URI of the created resource.

    Size int

    The total number of nodes in the node group.

    CreationTimestamp string

    Creation timestamp in RFC3339 text format.

    Id string

    The provider-assigned unique ID for this managed resource.

    SelfLink string

    The URI of the created resource.

    Size int

    The total number of nodes in the node group.

    creationTimestamp String

    Creation timestamp in RFC3339 text format.

    id String

    The provider-assigned unique ID for this managed resource.

    selfLink String

    The URI of the created resource.

    size Integer

    The total number of nodes in the node group.

    creationTimestamp string

    Creation timestamp in RFC3339 text format.

    id string

    The provider-assigned unique ID for this managed resource.

    selfLink string

    The URI of the created resource.

    size number

    The total number of nodes in the node group.

    creation_timestamp str

    Creation timestamp in RFC3339 text format.

    id str

    The provider-assigned unique ID for this managed resource.

    self_link str

    The URI of the created resource.

    size int

    The total number of nodes in the node group.

    creationTimestamp String

    Creation timestamp in RFC3339 text format.

    id String

    The provider-assigned unique ID for this managed resource.

    selfLink String

    The URI of the created resource.

    size Number

    The total number of nodes in the node group.

    Look up Existing NodeGroup Resource

    Get an existing NodeGroup resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: NodeGroupState, opts?: CustomResourceOptions): NodeGroup
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            autoscaling_policy: Optional[NodeGroupAutoscalingPolicyArgs] = None,
            creation_timestamp: Optional[str] = None,
            description: Optional[str] = None,
            initial_size: Optional[int] = None,
            maintenance_interval: Optional[str] = None,
            maintenance_policy: Optional[str] = None,
            maintenance_window: Optional[NodeGroupMaintenanceWindowArgs] = None,
            name: Optional[str] = None,
            node_template: Optional[str] = None,
            project: Optional[str] = None,
            self_link: Optional[str] = None,
            share_settings: Optional[NodeGroupShareSettingsArgs] = None,
            size: Optional[int] = None,
            zone: Optional[str] = None) -> NodeGroup
    func GetNodeGroup(ctx *Context, name string, id IDInput, state *NodeGroupState, opts ...ResourceOption) (*NodeGroup, error)
    public static NodeGroup Get(string name, Input<string> id, NodeGroupState? state, CustomResourceOptions? opts = null)
    public static NodeGroup get(String name, Output<String> id, NodeGroupState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AutoscalingPolicy NodeGroupAutoscalingPolicy

    If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. One of initial_size or autoscaling_policy must be configured on resource creation. Structure is documented below.

    CreationTimestamp string

    Creation timestamp in RFC3339 text format.

    Description string

    An optional textual description of the resource.

    InitialSize int

    The initial number of nodes in the node group. One of initial_size or autoscaling_policy must be configured on resource creation.

    MaintenanceInterval string

    Specifies the frequency of planned maintenance events. Set to one of the following: - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]

    MaintenancePolicy string

    Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.

    MaintenanceWindow NodeGroupMaintenanceWindow

    contains properties for the timeframe of maintenance Structure is documented below.

    Name string

    Name of the resource.

    NodeTemplate string

    The URL of the node template to which this node group belongs.


    Project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    SelfLink string

    The URI of the created resource.

    ShareSettings NodeGroupShareSettings

    Share settings for the node group. Structure is documented below.

    Size int

    The total number of nodes in the node group.

    Zone string

    Zone where this node group is located

    AutoscalingPolicy NodeGroupAutoscalingPolicyArgs

    If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. One of initial_size or autoscaling_policy must be configured on resource creation. Structure is documented below.

    CreationTimestamp string

    Creation timestamp in RFC3339 text format.

    Description string

    An optional textual description of the resource.

    InitialSize int

    The initial number of nodes in the node group. One of initial_size or autoscaling_policy must be configured on resource creation.

    MaintenanceInterval string

    Specifies the frequency of planned maintenance events. Set to one of the following: - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]

    MaintenancePolicy string

    Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.

    MaintenanceWindow NodeGroupMaintenanceWindowArgs

    contains properties for the timeframe of maintenance Structure is documented below.

    Name string

    Name of the resource.

    NodeTemplate string

    The URL of the node template to which this node group belongs.


    Project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    SelfLink string

    The URI of the created resource.

    ShareSettings NodeGroupShareSettingsArgs

    Share settings for the node group. Structure is documented below.

    Size int

    The total number of nodes in the node group.

    Zone string

    Zone where this node group is located

    autoscalingPolicy NodeGroupAutoscalingPolicy

    If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. One of initial_size or autoscaling_policy must be configured on resource creation. Structure is documented below.

    creationTimestamp String

    Creation timestamp in RFC3339 text format.

    description String

    An optional textual description of the resource.

    initialSize Integer

    The initial number of nodes in the node group. One of initial_size or autoscaling_policy must be configured on resource creation.

    maintenanceInterval String

    Specifies the frequency of planned maintenance events. Set to one of the following: - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]

    maintenancePolicy String

    Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.

    maintenanceWindow NodeGroupMaintenanceWindow

    contains properties for the timeframe of maintenance Structure is documented below.

    name String

    Name of the resource.

    nodeTemplate String

    The URL of the node template to which this node group belongs.


    project String

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    selfLink String

    The URI of the created resource.

    shareSettings NodeGroupShareSettings

    Share settings for the node group. Structure is documented below.

    size Integer

    The total number of nodes in the node group.

    zone String

    Zone where this node group is located

    autoscalingPolicy NodeGroupAutoscalingPolicy

    If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. One of initial_size or autoscaling_policy must be configured on resource creation. Structure is documented below.

    creationTimestamp string

    Creation timestamp in RFC3339 text format.

    description string

    An optional textual description of the resource.

    initialSize number

    The initial number of nodes in the node group. One of initial_size or autoscaling_policy must be configured on resource creation.

    maintenanceInterval string

    Specifies the frequency of planned maintenance events. Set to one of the following: - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]

    maintenancePolicy string

    Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.

    maintenanceWindow NodeGroupMaintenanceWindow

    contains properties for the timeframe of maintenance Structure is documented below.

    name string

    Name of the resource.

    nodeTemplate string

    The URL of the node template to which this node group belongs.


    project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    selfLink string

    The URI of the created resource.

    shareSettings NodeGroupShareSettings

    Share settings for the node group. Structure is documented below.

    size number

    The total number of nodes in the node group.

    zone string

    Zone where this node group is located

    autoscaling_policy NodeGroupAutoscalingPolicyArgs

    If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. One of initial_size or autoscaling_policy must be configured on resource creation. Structure is documented below.

    creation_timestamp str

    Creation timestamp in RFC3339 text format.

    description str

    An optional textual description of the resource.

    initial_size int

    The initial number of nodes in the node group. One of initial_size or autoscaling_policy must be configured on resource creation.

    maintenance_interval str

    Specifies the frequency of planned maintenance events. Set to one of the following: - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]

    maintenance_policy str

    Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.

    maintenance_window NodeGroupMaintenanceWindowArgs

    contains properties for the timeframe of maintenance Structure is documented below.

    name str

    Name of the resource.

    node_template str

    The URL of the node template to which this node group belongs.


    project str

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    self_link str

    The URI of the created resource.

    share_settings NodeGroupShareSettingsArgs

    Share settings for the node group. Structure is documented below.

    size int

    The total number of nodes in the node group.

    zone str

    Zone where this node group is located

    autoscalingPolicy Property Map

    If you use sole-tenant nodes for your workloads, you can use the node group autoscaler to automatically manage the sizes of your node groups. One of initial_size or autoscaling_policy must be configured on resource creation. Structure is documented below.

    creationTimestamp String

    Creation timestamp in RFC3339 text format.

    description String

    An optional textual description of the resource.

    initialSize Number

    The initial number of nodes in the node group. One of initial_size or autoscaling_policy must be configured on resource creation.

    maintenanceInterval String

    Specifies the frequency of planned maintenance events. Set to one of the following: - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]

    maintenancePolicy String

    Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.

    maintenanceWindow Property Map

    contains properties for the timeframe of maintenance Structure is documented below.

    name String

    Name of the resource.

    nodeTemplate String

    The URL of the node template to which this node group belongs.


    project String

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    selfLink String

    The URI of the created resource.

    shareSettings Property Map

    Share settings for the node group. Structure is documented below.

    size Number

    The total number of nodes in the node group.

    zone String

    Zone where this node group is located

    Supporting Types

    NodeGroupAutoscalingPolicy, NodeGroupAutoscalingPolicyArgs

    MaxNodes int

    Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.

    MinNodes int

    Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.

    Mode string

    The autoscaling mode. Set to one of the following:

    • OFF: Disables the autoscaler.
    • ON: Enables scaling in and scaling out.
    • ONLY_SCALE_OUT: Enables only scaling out. You must use this mode if your node groups are configured to restart their hosted VMs on minimal servers. Possible values are: OFF, ON, ONLY_SCALE_OUT.
    MaxNodes int

    Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.

    MinNodes int

    Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.

    Mode string

    The autoscaling mode. Set to one of the following:

    • OFF: Disables the autoscaler.
    • ON: Enables scaling in and scaling out.
    • ONLY_SCALE_OUT: Enables only scaling out. You must use this mode if your node groups are configured to restart their hosted VMs on minimal servers. Possible values are: OFF, ON, ONLY_SCALE_OUT.
    maxNodes Integer

    Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.

    minNodes Integer

    Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.

    mode String

    The autoscaling mode. Set to one of the following:

    • OFF: Disables the autoscaler.
    • ON: Enables scaling in and scaling out.
    • ONLY_SCALE_OUT: Enables only scaling out. You must use this mode if your node groups are configured to restart their hosted VMs on minimal servers. Possible values are: OFF, ON, ONLY_SCALE_OUT.
    maxNodes number

    Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.

    minNodes number

    Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.

    mode string

    The autoscaling mode. Set to one of the following:

    • OFF: Disables the autoscaler.
    • ON: Enables scaling in and scaling out.
    • ONLY_SCALE_OUT: Enables only scaling out. You must use this mode if your node groups are configured to restart their hosted VMs on minimal servers. Possible values are: OFF, ON, ONLY_SCALE_OUT.
    max_nodes int

    Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.

    min_nodes int

    Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.

    mode str

    The autoscaling mode. Set to one of the following:

    • OFF: Disables the autoscaler.
    • ON: Enables scaling in and scaling out.
    • ONLY_SCALE_OUT: Enables only scaling out. You must use this mode if your node groups are configured to restart their hosted VMs on minimal servers. Possible values are: OFF, ON, ONLY_SCALE_OUT.
    maxNodes Number

    Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.

    minNodes Number

    Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.

    mode String

    The autoscaling mode. Set to one of the following:

    • OFF: Disables the autoscaler.
    • ON: Enables scaling in and scaling out.
    • ONLY_SCALE_OUT: Enables only scaling out. You must use this mode if your node groups are configured to restart their hosted VMs on minimal servers. Possible values are: OFF, ON, ONLY_SCALE_OUT.

    NodeGroupMaintenanceWindow, NodeGroupMaintenanceWindowArgs

    StartTime string

    instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.

    StartTime string

    instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.

    startTime String

    instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.

    startTime string

    instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.

    start_time str

    instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.

    startTime String

    instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.

    NodeGroupShareSettings, NodeGroupShareSettingsArgs

    ShareType string

    Node group sharing type. Possible values are: ORGANIZATION, SPECIFIC_PROJECTS, LOCAL.

    ProjectMaps List<NodeGroupShareSettingsProjectMap>

    A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.

    ShareType string

    Node group sharing type. Possible values are: ORGANIZATION, SPECIFIC_PROJECTS, LOCAL.

    ProjectMaps []NodeGroupShareSettingsProjectMap

    A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.

    shareType String

    Node group sharing type. Possible values are: ORGANIZATION, SPECIFIC_PROJECTS, LOCAL.

    projectMaps List<NodeGroupShareSettingsProjectMap>

    A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.

    shareType string

    Node group sharing type. Possible values are: ORGANIZATION, SPECIFIC_PROJECTS, LOCAL.

    projectMaps NodeGroupShareSettingsProjectMap[]

    A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.

    share_type str

    Node group sharing type. Possible values are: ORGANIZATION, SPECIFIC_PROJECTS, LOCAL.

    project_maps Sequence[NodeGroupShareSettingsProjectMap]

    A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.

    shareType String

    Node group sharing type. Possible values are: ORGANIZATION, SPECIFIC_PROJECTS, LOCAL.

    projectMaps List<Property Map>

    A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.

    NodeGroupShareSettingsProjectMap, NodeGroupShareSettingsProjectMapArgs

    Id string

    The identifier for this object. Format specified above.

    ProjectId string

    The project id/number should be the same as the key of this project config in the project map.

    Id string

    The identifier for this object. Format specified above.

    ProjectId string

    The project id/number should be the same as the key of this project config in the project map.

    id String

    The identifier for this object. Format specified above.

    projectId String

    The project id/number should be the same as the key of this project config in the project map.

    id string

    The identifier for this object. Format specified above.

    projectId string

    The project id/number should be the same as the key of this project config in the project map.

    id str

    The identifier for this object. Format specified above.

    project_id str

    The project id/number should be the same as the key of this project config in the project map.

    id String

    The identifier for this object. Format specified above.

    projectId String

    The project id/number should be the same as the key of this project config in the project map.

    Import

    NodeGroup can be imported using any of these accepted formats* projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}} * {{project}}/{{zone}}/{{name}} * {{zone}}/{{name}} * {{name}} In Terraform v1.5.0 and later, use an import block to import NodeGroup using one of the formats above. For exampletf import {

    id = “projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}”

    to = google_compute_node_group.default }

     $ pulumi import gcp:compute/nodeGroup:NodeGroup When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), NodeGroup can be imported using one of the formats above. For example
    
     $ pulumi import gcp:compute/nodeGroup:NodeGroup default projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}
    
     $ pulumi import gcp:compute/nodeGroup:NodeGroup default {{project}}/{{zone}}/{{name}}
    
     $ pulumi import gcp:compute/nodeGroup:NodeGroup default {{zone}}/{{name}}
    
     $ pulumi import gcp:compute/nodeGroup:NodeGroup default {{name}}
    

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes

    This Pulumi package is based on the google-beta Terraform Provider.

    gcp logo
    Google Cloud Classic v7.2.2 published on Monday, Jan 1, 0001 by Pulumi