1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. container
  5. NodePool
Google Cloud Classic v7.16.0 published on Wednesday, Mar 27, 2024 by Pulumi

gcp.container.NodePool

Explore with Pulumi AI

gcp logo
Google Cloud Classic v7.16.0 published on Wednesday, Mar 27, 2024 by Pulumi

    Manages a node pool in a Google Kubernetes Engine (GKE) cluster separately from the cluster control plane. For more information see the official documentation and the API reference.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const _default = new gcp.serviceaccount.Account("default", {
        accountId: "service-account-id",
        displayName: "Service Account",
    });
    const primary = new gcp.container.Cluster("primary", {
        name: "my-gke-cluster",
        location: "us-central1",
        removeDefaultNodePool: true,
        initialNodeCount: 1,
    });
    const primaryPreemptibleNodes = new gcp.container.NodePool("primary_preemptible_nodes", {
        name: "my-node-pool",
        cluster: primary.id,
        nodeCount: 1,
        nodeConfig: {
            preemptible: true,
            machineType: "e2-medium",
            serviceAccount: _default.email,
            oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    default = gcp.serviceaccount.Account("default",
        account_id="service-account-id",
        display_name="Service Account")
    primary = gcp.container.Cluster("primary",
        name="my-gke-cluster",
        location="us-central1",
        remove_default_node_pool=True,
        initial_node_count=1)
    primary_preemptible_nodes = gcp.container.NodePool("primary_preemptible_nodes",
        name="my-node-pool",
        cluster=primary.id,
        node_count=1,
        node_config=gcp.container.NodePoolNodeConfigArgs(
            preemptible=True,
            machine_type="e2-medium",
            service_account=default.email,
            oauth_scopes=["https://www.googleapis.com/auth/cloud-platform"],
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/container"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/serviceaccount"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
    			AccountId:   pulumi.String("service-account-id"),
    			DisplayName: pulumi.String("Service Account"),
    		})
    		if err != nil {
    			return err
    		}
    		primary, err := container.NewCluster(ctx, "primary", &container.ClusterArgs{
    			Name:                  pulumi.String("my-gke-cluster"),
    			Location:              pulumi.String("us-central1"),
    			RemoveDefaultNodePool: pulumi.Bool(true),
    			InitialNodeCount:      pulumi.Int(1),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = container.NewNodePool(ctx, "primary_preemptible_nodes", &container.NodePoolArgs{
    			Name:      pulumi.String("my-node-pool"),
    			Cluster:   primary.ID(),
    			NodeCount: pulumi.Int(1),
    			NodeConfig: &container.NodePoolNodeConfigArgs{
    				Preemptible:    pulumi.Bool(true),
    				MachineType:    pulumi.String("e2-medium"),
    				ServiceAccount: _default.Email,
    				OauthScopes: pulumi.StringArray{
    					pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var @default = new Gcp.ServiceAccount.Account("default", new()
        {
            AccountId = "service-account-id",
            DisplayName = "Service Account",
        });
    
        var primary = new Gcp.Container.Cluster("primary", new()
        {
            Name = "my-gke-cluster",
            Location = "us-central1",
            RemoveDefaultNodePool = true,
            InitialNodeCount = 1,
        });
    
        var primaryPreemptibleNodes = new Gcp.Container.NodePool("primary_preemptible_nodes", new()
        {
            Name = "my-node-pool",
            Cluster = primary.Id,
            NodeCount = 1,
            NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
            {
                Preemptible = true,
                MachineType = "e2-medium",
                ServiceAccount = @default.Email,
                OauthScopes = new[]
                {
                    "https://www.googleapis.com/auth/cloud-platform",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.serviceaccount.Account;
    import com.pulumi.gcp.serviceaccount.AccountArgs;
    import com.pulumi.gcp.container.Cluster;
    import com.pulumi.gcp.container.ClusterArgs;
    import com.pulumi.gcp.container.NodePool;
    import com.pulumi.gcp.container.NodePoolArgs;
    import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var default_ = new Account("default", AccountArgs.builder()        
                .accountId("service-account-id")
                .displayName("Service Account")
                .build());
    
            var primary = new Cluster("primary", ClusterArgs.builder()        
                .name("my-gke-cluster")
                .location("us-central1")
                .removeDefaultNodePool(true)
                .initialNodeCount(1)
                .build());
    
            var primaryPreemptibleNodes = new NodePool("primaryPreemptibleNodes", NodePoolArgs.builder()        
                .name("my-node-pool")
                .cluster(primary.id())
                .nodeCount(1)
                .nodeConfig(NodePoolNodeConfigArgs.builder()
                    .preemptible(true)
                    .machineType("e2-medium")
                    .serviceAccount(default_.email())
                    .oauthScopes("https://www.googleapis.com/auth/cloud-platform")
                    .build())
                .build());
    
        }
    }
    
    resources:
      default:
        type: gcp:serviceaccount:Account
        properties:
          accountId: service-account-id
          displayName: Service Account
      primary:
        type: gcp:container:Cluster
        properties:
          name: my-gke-cluster
          location: us-central1
          removeDefaultNodePool: true
          initialNodeCount: 1
      primaryPreemptibleNodes:
        type: gcp:container:NodePool
        name: primary_preemptible_nodes
        properties:
          name: my-node-pool
          cluster: ${primary.id}
          nodeCount: 1
          nodeConfig:
            preemptible: true
            machineType: e2-medium
            serviceAccount: ${default.email}
            oauthScopes:
              - https://www.googleapis.com/auth/cloud-platform
    

    2 Node Pools, 1 Separately Managed + The Default Node Pool

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const _default = new gcp.serviceaccount.Account("default", {
        accountId: "service-account-id",
        displayName: "Service Account",
    });
    const primary = new gcp.container.Cluster("primary", {
        name: "marcellus-wallace",
        location: "us-central1-a",
        initialNodeCount: 3,
        nodeLocations: ["us-central1-c"],
        nodeConfig: {
            serviceAccount: _default.email,
            oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
            guestAccelerators: [{
                type: "nvidia-tesla-k80",
                count: 1,
            }],
        },
    });
    const np = new gcp.container.NodePool("np", {
        name: "my-node-pool",
        cluster: primary.id,
        nodeConfig: {
            machineType: "e2-medium",
            serviceAccount: _default.email,
            oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    default = gcp.serviceaccount.Account("default",
        account_id="service-account-id",
        display_name="Service Account")
    primary = gcp.container.Cluster("primary",
        name="marcellus-wallace",
        location="us-central1-a",
        initial_node_count=3,
        node_locations=["us-central1-c"],
        node_config=gcp.container.ClusterNodeConfigArgs(
            service_account=default.email,
            oauth_scopes=["https://www.googleapis.com/auth/cloud-platform"],
            guest_accelerators=[gcp.container.ClusterNodeConfigGuestAcceleratorArgs(
                type="nvidia-tesla-k80",
                count=1,
            )],
        ))
    np = gcp.container.NodePool("np",
        name="my-node-pool",
        cluster=primary.id,
        node_config=gcp.container.NodePoolNodeConfigArgs(
            machine_type="e2-medium",
            service_account=default.email,
            oauth_scopes=["https://www.googleapis.com/auth/cloud-platform"],
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/container"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/serviceaccount"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
    			AccountId:   pulumi.String("service-account-id"),
    			DisplayName: pulumi.String("Service Account"),
    		})
    		if err != nil {
    			return err
    		}
    		primary, err := container.NewCluster(ctx, "primary", &container.ClusterArgs{
    			Name:             pulumi.String("marcellus-wallace"),
    			Location:         pulumi.String("us-central1-a"),
    			InitialNodeCount: pulumi.Int(3),
    			NodeLocations: pulumi.StringArray{
    				pulumi.String("us-central1-c"),
    			},
    			NodeConfig: &container.ClusterNodeConfigArgs{
    				ServiceAccount: _default.Email,
    				OauthScopes: pulumi.StringArray{
    					pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
    				},
    				GuestAccelerators: container.ClusterNodeConfigGuestAcceleratorArray{
    					&container.ClusterNodeConfigGuestAcceleratorArgs{
    						Type:  pulumi.String("nvidia-tesla-k80"),
    						Count: pulumi.Int(1),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = container.NewNodePool(ctx, "np", &container.NodePoolArgs{
    			Name:    pulumi.String("my-node-pool"),
    			Cluster: primary.ID(),
    			NodeConfig: &container.NodePoolNodeConfigArgs{
    				MachineType:    pulumi.String("e2-medium"),
    				ServiceAccount: _default.Email,
    				OauthScopes: pulumi.StringArray{
    					pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var @default = new Gcp.ServiceAccount.Account("default", new()
        {
            AccountId = "service-account-id",
            DisplayName = "Service Account",
        });
    
        var primary = new Gcp.Container.Cluster("primary", new()
        {
            Name = "marcellus-wallace",
            Location = "us-central1-a",
            InitialNodeCount = 3,
            NodeLocations = new[]
            {
                "us-central1-c",
            },
            NodeConfig = new Gcp.Container.Inputs.ClusterNodeConfigArgs
            {
                ServiceAccount = @default.Email,
                OauthScopes = new[]
                {
                    "https://www.googleapis.com/auth/cloud-platform",
                },
                GuestAccelerators = new[]
                {
                    new Gcp.Container.Inputs.ClusterNodeConfigGuestAcceleratorArgs
                    {
                        Type = "nvidia-tesla-k80",
                        Count = 1,
                    },
                },
            },
        });
    
        var np = new Gcp.Container.NodePool("np", new()
        {
            Name = "my-node-pool",
            Cluster = primary.Id,
            NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
            {
                MachineType = "e2-medium",
                ServiceAccount = @default.Email,
                OauthScopes = new[]
                {
                    "https://www.googleapis.com/auth/cloud-platform",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.serviceaccount.Account;
    import com.pulumi.gcp.serviceaccount.AccountArgs;
    import com.pulumi.gcp.container.Cluster;
    import com.pulumi.gcp.container.ClusterArgs;
    import com.pulumi.gcp.container.inputs.ClusterNodeConfigArgs;
    import com.pulumi.gcp.container.NodePool;
    import com.pulumi.gcp.container.NodePoolArgs;
    import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var default_ = new Account("default", AccountArgs.builder()        
                .accountId("service-account-id")
                .displayName("Service Account")
                .build());
    
            var primary = new Cluster("primary", ClusterArgs.builder()        
                .name("marcellus-wallace")
                .location("us-central1-a")
                .initialNodeCount(3)
                .nodeLocations("us-central1-c")
                .nodeConfig(ClusterNodeConfigArgs.builder()
                    .serviceAccount(default_.email())
                    .oauthScopes("https://www.googleapis.com/auth/cloud-platform")
                    .guestAccelerators(ClusterNodeConfigGuestAcceleratorArgs.builder()
                        .type("nvidia-tesla-k80")
                        .count(1)
                        .build())
                    .build())
                .build());
    
            var np = new NodePool("np", NodePoolArgs.builder()        
                .name("my-node-pool")
                .cluster(primary.id())
                .nodeConfig(NodePoolNodeConfigArgs.builder()
                    .machineType("e2-medium")
                    .serviceAccount(default_.email())
                    .oauthScopes("https://www.googleapis.com/auth/cloud-platform")
                    .build())
                .build());
    
        }
    }
    
    resources:
      default:
        type: gcp:serviceaccount:Account
        properties:
          accountId: service-account-id
          displayName: Service Account
      np:
        type: gcp:container:NodePool
        properties:
          name: my-node-pool
          cluster: ${primary.id}
          nodeConfig:
            machineType: e2-medium
            serviceAccount: ${default.email}
            oauthScopes:
              - https://www.googleapis.com/auth/cloud-platform
      primary:
        type: gcp:container:Cluster
        properties:
          name: marcellus-wallace
          location: us-central1-a
          initialNodeCount: 3
          nodeLocations:
            - us-central1-c
          nodeConfig:
            serviceAccount: ${default.email}
            oauthScopes:
              - https://www.googleapis.com/auth/cloud-platform
            guestAccelerators:
              - type: nvidia-tesla-k80
                count: 1
    

    Create NodePool Resource

    new NodePool(name: string, args: NodePoolArgs, opts?: CustomResourceOptions);
    @overload
    def NodePool(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 autoscaling: Optional[NodePoolAutoscalingArgs] = None,
                 cluster: Optional[str] = None,
                 initial_node_count: Optional[int] = None,
                 location: Optional[str] = None,
                 management: Optional[NodePoolManagementArgs] = None,
                 max_pods_per_node: Optional[int] = None,
                 name: Optional[str] = None,
                 name_prefix: Optional[str] = None,
                 network_config: Optional[NodePoolNetworkConfigArgs] = None,
                 node_config: Optional[NodePoolNodeConfigArgs] = None,
                 node_count: Optional[int] = None,
                 node_locations: Optional[Sequence[str]] = None,
                 placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
                 project: Optional[str] = None,
                 queued_provisioning: Optional[NodePoolQueuedProvisioningArgs] = None,
                 upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
                 version: Optional[str] = None)
    @overload
    def NodePool(resource_name: str,
                 args: NodePoolArgs,
                 opts: Optional[ResourceOptions] = None)
    func NewNodePool(ctx *Context, name string, args NodePoolArgs, opts ...ResourceOption) (*NodePool, error)
    public NodePool(string name, NodePoolArgs args, CustomResourceOptions? opts = null)
    public NodePool(String name, NodePoolArgs args)
    public NodePool(String name, NodePoolArgs args, CustomResourceOptions options)
    
    type: gcp:container:NodePool
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    NodePool Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The NodePool resource accepts the following input properties:

    Cluster string
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    Autoscaling NodePoolAutoscaling
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    InitialNodeCount int
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    Location string
    The location (region or zone) of the cluster.


    Management NodePoolManagement
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    MaxPodsPerNode int
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    Name string
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    NamePrefix string
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    NetworkConfig NodePoolNetworkConfig
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    NodeConfig NodePoolNodeConfig
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    NodeCount int
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    NodeLocations List<string>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    PlacementPolicy NodePoolPlacementPolicy
    Specifies a custom placement policy for the nodes.
    Project string
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    QueuedProvisioning NodePoolQueuedProvisioning

    Specifies node pool-level settings of queued provisioning. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    UpgradeSettings NodePoolUpgradeSettings
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    Version string
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    Cluster string
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    Autoscaling NodePoolAutoscalingArgs
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    InitialNodeCount int
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    Location string
    The location (region or zone) of the cluster.


    Management NodePoolManagementArgs
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    MaxPodsPerNode int
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    Name string
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    NamePrefix string
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    NetworkConfig NodePoolNetworkConfigArgs
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    NodeConfig NodePoolNodeConfigArgs
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    NodeCount int
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    NodeLocations []string

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    PlacementPolicy NodePoolPlacementPolicyArgs
    Specifies a custom placement policy for the nodes.
    Project string
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    QueuedProvisioning NodePoolQueuedProvisioningArgs

    Specifies node pool-level settings of queued provisioning. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    UpgradeSettings NodePoolUpgradeSettingsArgs
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    Version string
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    cluster String
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    autoscaling NodePoolAutoscaling
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    initialNodeCount Integer
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    location String
    The location (region or zone) of the cluster.


    management NodePoolManagement
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    maxPodsPerNode Integer
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name String
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    namePrefix String
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    networkConfig NodePoolNetworkConfig
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    nodeConfig NodePoolNodeConfig
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    nodeCount Integer
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    nodeLocations List<String>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    placementPolicy NodePoolPlacementPolicy
    Specifies a custom placement policy for the nodes.
    project String
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queuedProvisioning NodePoolQueuedProvisioning

    Specifies node pool-level settings of queued provisioning. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    upgradeSettings NodePoolUpgradeSettings
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version String
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    cluster string
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    autoscaling NodePoolAutoscaling
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    initialNodeCount number
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    location string
    The location (region or zone) of the cluster.


    management NodePoolManagement
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    maxPodsPerNode number
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name string
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    namePrefix string
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    networkConfig NodePoolNetworkConfig
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    nodeConfig NodePoolNodeConfig
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    nodeCount number
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    nodeLocations string[]

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    placementPolicy NodePoolPlacementPolicy
    Specifies a custom placement policy for the nodes.
    project string
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queuedProvisioning NodePoolQueuedProvisioning

    Specifies node pool-level settings of queued provisioning. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    upgradeSettings NodePoolUpgradeSettings
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version string
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    cluster str
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    autoscaling NodePoolAutoscalingArgs
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    initial_node_count int
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    location str
    The location (region or zone) of the cluster.


    management NodePoolManagementArgs
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    max_pods_per_node int
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name str
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    name_prefix str
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    network_config NodePoolNetworkConfigArgs
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    node_config NodePoolNodeConfigArgs
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    node_count int
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    node_locations Sequence[str]

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    placement_policy NodePoolPlacementPolicyArgs
    Specifies a custom placement policy for the nodes.
    project str
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queued_provisioning NodePoolQueuedProvisioningArgs

    Specifies node pool-level settings of queued provisioning. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    upgrade_settings NodePoolUpgradeSettingsArgs
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version str
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    cluster String
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    autoscaling Property Map
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    initialNodeCount Number
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    location String
    The location (region or zone) of the cluster.


    management Property Map
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    maxPodsPerNode Number
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name String
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    namePrefix String
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    networkConfig Property Map
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    nodeConfig Property Map
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    nodeCount Number
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    nodeLocations List<String>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    placementPolicy Property Map
    Specifies a custom placement policy for the nodes.
    project String
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queuedProvisioning Property Map

    Specifies node pool-level settings of queued provisioning. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    upgradeSettings Property Map
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version String
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the NodePool resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    InstanceGroupUrls List<string>
    The resource URLs of the managed instance groups associated with this node pool.
    ManagedInstanceGroupUrls List<string>
    List of instance group URLs which have been assigned to this node pool.
    Operation string
    Id string
    The provider-assigned unique ID for this managed resource.
    InstanceGroupUrls []string
    The resource URLs of the managed instance groups associated with this node pool.
    ManagedInstanceGroupUrls []string
    List of instance group URLs which have been assigned to this node pool.
    Operation string
    id String
    The provider-assigned unique ID for this managed resource.
    instanceGroupUrls List<String>
    The resource URLs of the managed instance groups associated with this node pool.
    managedInstanceGroupUrls List<String>
    List of instance group URLs which have been assigned to this node pool.
    operation String
    id string
    The provider-assigned unique ID for this managed resource.
    instanceGroupUrls string[]
    The resource URLs of the managed instance groups associated with this node pool.
    managedInstanceGroupUrls string[]
    List of instance group URLs which have been assigned to this node pool.
    operation string
    id str
    The provider-assigned unique ID for this managed resource.
    instance_group_urls Sequence[str]
    The resource URLs of the managed instance groups associated with this node pool.
    managed_instance_group_urls Sequence[str]
    List of instance group URLs which have been assigned to this node pool.
    operation str
    id String
    The provider-assigned unique ID for this managed resource.
    instanceGroupUrls List<String>
    The resource URLs of the managed instance groups associated with this node pool.
    managedInstanceGroupUrls List<String>
    List of instance group URLs which have been assigned to this node pool.
    operation String

    Look up Existing NodePool Resource

    Get an existing NodePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: NodePoolState, opts?: CustomResourceOptions): NodePool
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            autoscaling: Optional[NodePoolAutoscalingArgs] = None,
            cluster: Optional[str] = None,
            initial_node_count: Optional[int] = None,
            instance_group_urls: Optional[Sequence[str]] = None,
            location: Optional[str] = None,
            managed_instance_group_urls: Optional[Sequence[str]] = None,
            management: Optional[NodePoolManagementArgs] = None,
            max_pods_per_node: Optional[int] = None,
            name: Optional[str] = None,
            name_prefix: Optional[str] = None,
            network_config: Optional[NodePoolNetworkConfigArgs] = None,
            node_config: Optional[NodePoolNodeConfigArgs] = None,
            node_count: Optional[int] = None,
            node_locations: Optional[Sequence[str]] = None,
            operation: Optional[str] = None,
            placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
            project: Optional[str] = None,
            queued_provisioning: Optional[NodePoolQueuedProvisioningArgs] = None,
            upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
            version: Optional[str] = None) -> NodePool
    func GetNodePool(ctx *Context, name string, id IDInput, state *NodePoolState, opts ...ResourceOption) (*NodePool, error)
    public static NodePool Get(string name, Input<string> id, NodePoolState? state, CustomResourceOptions? opts = null)
    public static NodePool get(String name, Output<String> id, NodePoolState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Autoscaling NodePoolAutoscaling
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    Cluster string
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    InitialNodeCount int
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    InstanceGroupUrls List<string>
    The resource URLs of the managed instance groups associated with this node pool.
    Location string
    The location (region or zone) of the cluster.


    ManagedInstanceGroupUrls List<string>
    List of instance group URLs which have been assigned to this node pool.
    Management NodePoolManagement
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    MaxPodsPerNode int
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    Name string
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    NamePrefix string
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    NetworkConfig NodePoolNetworkConfig
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    NodeConfig NodePoolNodeConfig
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    NodeCount int
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    NodeLocations List<string>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    Operation string
    PlacementPolicy NodePoolPlacementPolicy
    Specifies a custom placement policy for the nodes.
    Project string
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    QueuedProvisioning NodePoolQueuedProvisioning

    Specifies node pool-level settings of queued provisioning. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    UpgradeSettings NodePoolUpgradeSettings
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    Version string
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    Autoscaling NodePoolAutoscalingArgs
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    Cluster string
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    InitialNodeCount int
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    InstanceGroupUrls []string
    The resource URLs of the managed instance groups associated with this node pool.
    Location string
    The location (region or zone) of the cluster.


    ManagedInstanceGroupUrls []string
    List of instance group URLs which have been assigned to this node pool.
    Management NodePoolManagementArgs
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    MaxPodsPerNode int
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    Name string
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    NamePrefix string
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    NetworkConfig NodePoolNetworkConfigArgs
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    NodeConfig NodePoolNodeConfigArgs
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    NodeCount int
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    NodeLocations []string

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    Operation string
    PlacementPolicy NodePoolPlacementPolicyArgs
    Specifies a custom placement policy for the nodes.
    Project string
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    QueuedProvisioning NodePoolQueuedProvisioningArgs

    Specifies node pool-level settings of queued provisioning. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    UpgradeSettings NodePoolUpgradeSettingsArgs
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    Version string
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    autoscaling NodePoolAutoscaling
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    cluster String
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    initialNodeCount Integer
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    instanceGroupUrls List<String>
    The resource URLs of the managed instance groups associated with this node pool.
    location String
    The location (region or zone) of the cluster.


    managedInstanceGroupUrls List<String>
    List of instance group URLs which have been assigned to this node pool.
    management NodePoolManagement
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    maxPodsPerNode Integer
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name String
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    namePrefix String
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    networkConfig NodePoolNetworkConfig
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    nodeConfig NodePoolNodeConfig
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    nodeCount Integer
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    nodeLocations List<String>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    operation String
    placementPolicy NodePoolPlacementPolicy
    Specifies a custom placement policy for the nodes.
    project String
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queuedProvisioning NodePoolQueuedProvisioning

    Specifies node pool-level settings of queued provisioning. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    upgradeSettings NodePoolUpgradeSettings
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version String
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    autoscaling NodePoolAutoscaling
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    cluster string
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    initialNodeCount number
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    instanceGroupUrls string[]
    The resource URLs of the managed instance groups associated with this node pool.
    location string
    The location (region or zone) of the cluster.


    managedInstanceGroupUrls string[]
    List of instance group URLs which have been assigned to this node pool.
    management NodePoolManagement
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    maxPodsPerNode number
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name string
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    namePrefix string
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    networkConfig NodePoolNetworkConfig
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    nodeConfig NodePoolNodeConfig
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    nodeCount number
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    nodeLocations string[]

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    operation string
    placementPolicy NodePoolPlacementPolicy
    Specifies a custom placement policy for the nodes.
    project string
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queuedProvisioning NodePoolQueuedProvisioning

    Specifies node pool-level settings of queued provisioning. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    upgradeSettings NodePoolUpgradeSettings
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version string
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    autoscaling NodePoolAutoscalingArgs
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    cluster str
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    initial_node_count int
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    instance_group_urls Sequence[str]
    The resource URLs of the managed instance groups associated with this node pool.
    location str
    The location (region or zone) of the cluster.


    managed_instance_group_urls Sequence[str]
    List of instance group URLs which have been assigned to this node pool.
    management NodePoolManagementArgs
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    max_pods_per_node int
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name str
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    name_prefix str
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    network_config NodePoolNetworkConfigArgs
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    node_config NodePoolNodeConfigArgs
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    node_count int
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    node_locations Sequence[str]

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    operation str
    placement_policy NodePoolPlacementPolicyArgs
    Specifies a custom placement policy for the nodes.
    project str
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queued_provisioning NodePoolQueuedProvisioningArgs

    Specifies node pool-level settings of queued provisioning. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    upgrade_settings NodePoolUpgradeSettingsArgs
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version str
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    autoscaling Property Map
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    cluster String
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    initialNodeCount Number
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    instanceGroupUrls List<String>
    The resource URLs of the managed instance groups associated with this node pool.
    location String
    The location (region or zone) of the cluster.


    managedInstanceGroupUrls List<String>
    List of instance group URLs which have been assigned to this node pool.
    management Property Map
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    maxPodsPerNode Number
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name String
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    namePrefix String
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    networkConfig Property Map
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    nodeConfig Property Map
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    nodeCount Number
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    nodeLocations List<String>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    operation String
    placementPolicy Property Map
    Specifies a custom placement policy for the nodes.
    project String
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queuedProvisioning Property Map

    Specifies node pool-level settings of queued provisioning. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    upgradeSettings Property Map
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version String
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    Supporting Types

    NodePoolAutoscaling, NodePoolAutoscalingArgs

    LocationPolicy string
    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    MaxNodeCount int
    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
    MinNodeCount int
    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
    TotalMaxNodeCount int
    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    TotalMinNodeCount int
    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    LocationPolicy string
    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    MaxNodeCount int
    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
    MinNodeCount int
    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
    TotalMaxNodeCount int
    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    TotalMinNodeCount int
    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    locationPolicy String
    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    maxNodeCount Integer
    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
    minNodeCount Integer
    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
    totalMaxNodeCount Integer
    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    totalMinNodeCount Integer
    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    locationPolicy string
    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    maxNodeCount number
    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
    minNodeCount number
    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
    totalMaxNodeCount number
    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    totalMinNodeCount number
    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    location_policy str
    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    max_node_count int
    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
    min_node_count int
    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
    total_max_node_count int
    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    total_min_node_count int
    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    locationPolicy String
    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    maxNodeCount Number
    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
    minNodeCount Number
    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
    totalMaxNodeCount Number
    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    totalMinNodeCount Number
    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    NodePoolManagement, NodePoolManagementArgs

    AutoRepair bool
    Whether the nodes will be automatically repaired. Enabled by default.
    AutoUpgrade bool
    Whether the nodes will be automatically upgraded. Enabled by default.
    AutoRepair bool
    Whether the nodes will be automatically repaired. Enabled by default.
    AutoUpgrade bool
    Whether the nodes will be automatically upgraded. Enabled by default.
    autoRepair Boolean
    Whether the nodes will be automatically repaired. Enabled by default.
    autoUpgrade Boolean
    Whether the nodes will be automatically upgraded. Enabled by default.
    autoRepair boolean
    Whether the nodes will be automatically repaired. Enabled by default.
    autoUpgrade boolean
    Whether the nodes will be automatically upgraded. Enabled by default.
    auto_repair bool
    Whether the nodes will be automatically repaired. Enabled by default.
    auto_upgrade bool
    Whether the nodes will be automatically upgraded. Enabled by default.
    autoRepair Boolean
    Whether the nodes will be automatically repaired. Enabled by default.
    autoUpgrade Boolean
    Whether the nodes will be automatically upgraded. Enabled by default.

    NodePoolNetworkConfig, NodePoolNetworkConfigArgs

    AdditionalNodeNetworkConfigs List<NodePoolNetworkConfigAdditionalNodeNetworkConfig>
    We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
    AdditionalPodNetworkConfigs List<NodePoolNetworkConfigAdditionalPodNetworkConfig>
    We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
    CreatePodRange bool
    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.
    EnablePrivateNodes bool
    Whether nodes have internal IP addresses only.
    NetworkPerformanceConfig NodePoolNetworkConfigNetworkPerformanceConfig
    Network bandwidth tier configuration.
    PodCidrOverprovisionConfig NodePoolNetworkConfigPodCidrOverprovisionConfig
    Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
    PodIpv4CidrBlock string
    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
    PodRange string
    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
    AdditionalNodeNetworkConfigs []NodePoolNetworkConfigAdditionalNodeNetworkConfig
    We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
    AdditionalPodNetworkConfigs []NodePoolNetworkConfigAdditionalPodNetworkConfig
    We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
    CreatePodRange bool
    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.
    EnablePrivateNodes bool
    Whether nodes have internal IP addresses only.
    NetworkPerformanceConfig NodePoolNetworkConfigNetworkPerformanceConfig
    Network bandwidth tier configuration.
    PodCidrOverprovisionConfig NodePoolNetworkConfigPodCidrOverprovisionConfig
    Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
    PodIpv4CidrBlock string
    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
    PodRange string
    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
    additionalNodeNetworkConfigs List<NodePoolNetworkConfigAdditionalNodeNetworkConfig>
    We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
    additionalPodNetworkConfigs List<NodePoolNetworkConfigAdditionalPodNetworkConfig>
    We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
    createPodRange Boolean
    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.
    enablePrivateNodes Boolean
    Whether nodes have internal IP addresses only.
    networkPerformanceConfig NodePoolNetworkConfigNetworkPerformanceConfig
    Network bandwidth tier configuration.
    podCidrOverprovisionConfig NodePoolNetworkConfigPodCidrOverprovisionConfig
    Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
    podIpv4CidrBlock String
    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
    podRange String
    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
    additionalNodeNetworkConfigs NodePoolNetworkConfigAdditionalNodeNetworkConfig[]
    We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
    additionalPodNetworkConfigs NodePoolNetworkConfigAdditionalPodNetworkConfig[]
    We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
    createPodRange boolean
    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.
    enablePrivateNodes boolean
    Whether nodes have internal IP addresses only.
    networkPerformanceConfig NodePoolNetworkConfigNetworkPerformanceConfig
    Network bandwidth tier configuration.
    podCidrOverprovisionConfig NodePoolNetworkConfigPodCidrOverprovisionConfig
    Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
    podIpv4CidrBlock string
    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
    podRange string
    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
    additional_node_network_configs Sequence[NodePoolNetworkConfigAdditionalNodeNetworkConfig]
    We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
    additional_pod_network_configs Sequence[NodePoolNetworkConfigAdditionalPodNetworkConfig]
    We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
    create_pod_range bool
    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.
    enable_private_nodes bool
    Whether nodes have internal IP addresses only.
    network_performance_config NodePoolNetworkConfigNetworkPerformanceConfig
    Network bandwidth tier configuration.
    pod_cidr_overprovision_config NodePoolNetworkConfigPodCidrOverprovisionConfig
    Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
    pod_ipv4_cidr_block str
    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
    pod_range str
    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
    additionalNodeNetworkConfigs List<Property Map>
    We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
    additionalPodNetworkConfigs List<Property Map>
    We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
    createPodRange Boolean
    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.
    enablePrivateNodes Boolean
    Whether nodes have internal IP addresses only.
    networkPerformanceConfig Property Map
    Network bandwidth tier configuration.
    podCidrOverprovisionConfig Property Map
    Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
    podIpv4CidrBlock String
    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
    podRange String
    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.

    NodePoolNetworkConfigAdditionalNodeNetworkConfig, NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs

    Network string
    Name of the VPC where the additional interface belongs.
    Subnetwork string
    Name of the subnetwork where the additional interface belongs.
    Network string
    Name of the VPC where the additional interface belongs.
    Subnetwork string
    Name of the subnetwork where the additional interface belongs.
    network String
    Name of the VPC where the additional interface belongs.
    subnetwork String
    Name of the subnetwork where the additional interface belongs.
    network string
    Name of the VPC where the additional interface belongs.
    subnetwork string
    Name of the subnetwork where the additional interface belongs.
    network str
    Name of the VPC where the additional interface belongs.
    subnetwork str
    Name of the subnetwork where the additional interface belongs.
    network String
    Name of the VPC where the additional interface belongs.
    subnetwork String
    Name of the subnetwork where the additional interface belongs.

    NodePoolNetworkConfigAdditionalPodNetworkConfig, NodePoolNetworkConfigAdditionalPodNetworkConfigArgs

    MaxPodsPerNode int
    The maximum number of pods per node which use this pod network.
    SecondaryPodRange string
    The name of the secondary range on the subnet which provides IP address for this pod range.
    Subnetwork string
    Name of the subnetwork where the additional pod network belongs.
    MaxPodsPerNode int
    The maximum number of pods per node which use this pod network.
    SecondaryPodRange string
    The name of the secondary range on the subnet which provides IP address for this pod range.
    Subnetwork string
    Name of the subnetwork where the additional pod network belongs.
    maxPodsPerNode Integer
    The maximum number of pods per node which use this pod network.
    secondaryPodRange String
    The name of the secondary range on the subnet which provides IP address for this pod range.
    subnetwork String
    Name of the subnetwork where the additional pod network belongs.
    maxPodsPerNode number
    The maximum number of pods per node which use this pod network.
    secondaryPodRange string
    The name of the secondary range on the subnet which provides IP address for this pod range.
    subnetwork string
    Name of the subnetwork where the additional pod network belongs.
    max_pods_per_node int
    The maximum number of pods per node which use this pod network.
    secondary_pod_range str
    The name of the secondary range on the subnet which provides IP address for this pod range.
    subnetwork str
    Name of the subnetwork where the additional pod network belongs.
    maxPodsPerNode Number
    The maximum number of pods per node which use this pod network.
    secondaryPodRange String
    The name of the secondary range on the subnet which provides IP address for this pod range.
    subnetwork String
    Name of the subnetwork where the additional pod network belongs.

    NodePoolNetworkConfigNetworkPerformanceConfig, NodePoolNetworkConfigNetworkPerformanceConfigArgs

    TotalEgressBandwidthTier string
    Specifies the total network bandwidth tier for the NodePool.
    TotalEgressBandwidthTier string
    Specifies the total network bandwidth tier for the NodePool.
    totalEgressBandwidthTier String
    Specifies the total network bandwidth tier for the NodePool.
    totalEgressBandwidthTier string
    Specifies the total network bandwidth tier for the NodePool.
    total_egress_bandwidth_tier str
    Specifies the total network bandwidth tier for the NodePool.
    totalEgressBandwidthTier String
    Specifies the total network bandwidth tier for the NodePool.

    NodePoolNetworkConfigPodCidrOverprovisionConfig, NodePoolNetworkConfigPodCidrOverprovisionConfigArgs

    disabled Boolean
    disabled boolean
    disabled Boolean

    NodePoolNodeConfig, NodePoolNodeConfigArgs

    AdvancedMachineFeatures NodePoolNodeConfigAdvancedMachineFeatures
    Specifies options for controlling advanced machine features.
    BootDiskKmsKey string
    The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
    ConfidentialNodes NodePoolNodeConfigConfidentialNodes
    Configuration for Confidential Nodes feature. Structure is documented below.
    DiskSizeGb int
    Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
    DiskType string
    Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
    EffectiveTaints List<NodePoolNodeConfigEffectiveTaint>
    List of kubernetes taints applied to each node.
    EnableConfidentialStorage bool
    If enabled boot disks are configured with confidential mode.
    EphemeralStorageConfig NodePoolNodeConfigEphemeralStorageConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    EphemeralStorageLocalSsdConfig NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    FastSocket NodePoolNodeConfigFastSocket
    Enable or disable NCCL Fast Socket in the node pool.
    GcfsConfig NodePoolNodeConfigGcfsConfig
    GCFS configuration for this node.
    GuestAccelerators List<NodePoolNodeConfigGuestAccelerator>
    List of the type and count of accelerator cards attached to the instance.
    Gvnic NodePoolNodeConfigGvnic
    Enable or disable gvnic in the node pool.
    HostMaintenancePolicy NodePoolNodeConfigHostMaintenancePolicy
    The maintenance policy for the hosts on which the GKE VMs run on.
    ImageType string
    The image type to use for this node. Note that for a given image type, the latest version of it will be used.
    KubeletConfig NodePoolNodeConfigKubeletConfig
    Node kubelet configs.
    Labels Dictionary<string, string>
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
    LinuxNodeConfig NodePoolNodeConfigLinuxNodeConfig
    Parameters that can be configured on Linux nodes.
    LocalNvmeSsdBlockConfig NodePoolNodeConfigLocalNvmeSsdBlockConfig
    Parameters for raw-block local NVMe SSDs.
    LocalSsdCount int
    The number of local SSD disks to be attached to the node.
    LoggingVariant string
    Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
    MachineType string
    The name of a Google Compute Engine machine type.
    Metadata Dictionary<string, string>
    The metadata key/value pairs assigned to instances in the cluster.
    MinCpuPlatform string
    Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
    NodeGroup string
    Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
    OauthScopes List<string>
    The set of Google API scopes to be made available on all of the node VMs.
    Preemptible bool
    Whether the nodes are created as preemptible VM instances.
    ReservationAffinity NodePoolNodeConfigReservationAffinity
    The reservation affinity configuration for the node pool.
    ResourceLabels Dictionary<string, string>
    The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
    ResourceManagerTags Dictionary<string, object>
    A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
    SandboxConfig NodePoolNodeConfigSandboxConfig
    Sandbox configuration for this node.
    ServiceAccount string
    The Google Cloud Platform Service Account to be used by the node VMs.
    ShieldedInstanceConfig NodePoolNodeConfigShieldedInstanceConfig
    Shielded Instance options.
    SoleTenantConfig NodePoolNodeConfigSoleTenantConfig
    Node affinity options for sole tenant node pools.
    Spot bool
    Whether the nodes are created as spot VM instances.
    Tags List<string>
    The list of instance tags applied to all nodes.
    Taints List<NodePoolNodeConfigTaint>
    List of Kubernetes taints to be applied to each node.
    WorkloadMetadataConfig NodePoolNodeConfigWorkloadMetadataConfig
    The workload metadata configuration for this node.
    AdvancedMachineFeatures NodePoolNodeConfigAdvancedMachineFeatures
    Specifies options for controlling advanced machine features.
    BootDiskKmsKey string
    The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
    ConfidentialNodes NodePoolNodeConfigConfidentialNodes
    Configuration for Confidential Nodes feature. Structure is documented below.
    DiskSizeGb int
    Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
    DiskType string
    Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
    EffectiveTaints []NodePoolNodeConfigEffectiveTaint
    List of kubernetes taints applied to each node.
    EnableConfidentialStorage bool
    If enabled boot disks are configured with confidential mode.
    EphemeralStorageConfig NodePoolNodeConfigEphemeralStorageConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    EphemeralStorageLocalSsdConfig NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    FastSocket NodePoolNodeConfigFastSocket
    Enable or disable NCCL Fast Socket in the node pool.
    GcfsConfig NodePoolNodeConfigGcfsConfig
    GCFS configuration for this node.
    GuestAccelerators []NodePoolNodeConfigGuestAccelerator
    List of the type and count of accelerator cards attached to the instance.
    Gvnic NodePoolNodeConfigGvnic
    Enable or disable gvnic in the node pool.
    HostMaintenancePolicy NodePoolNodeConfigHostMaintenancePolicy
    The maintenance policy for the hosts on which the GKE VMs run on.
    ImageType string
    The image type to use for this node. Note that for a given image type, the latest version of it will be used.
    KubeletConfig NodePoolNodeConfigKubeletConfig
    Node kubelet configs.
    Labels map[string]string
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
    LinuxNodeConfig NodePoolNodeConfigLinuxNodeConfig
    Parameters that can be configured on Linux nodes.
    LocalNvmeSsdBlockConfig NodePoolNodeConfigLocalNvmeSsdBlockConfig
    Parameters for raw-block local NVMe SSDs.
    LocalSsdCount int
    The number of local SSD disks to be attached to the node.
    LoggingVariant string
    Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
    MachineType string
    The name of a Google Compute Engine machine type.
    Metadata map[string]string
    The metadata key/value pairs assigned to instances in the cluster.
    MinCpuPlatform string
    Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
    NodeGroup string
    Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
    OauthScopes []string
    The set of Google API scopes to be made available on all of the node VMs.
    Preemptible bool
    Whether the nodes are created as preemptible VM instances.
    ReservationAffinity NodePoolNodeConfigReservationAffinity
    The reservation affinity configuration for the node pool.
    ResourceLabels map[string]string
    The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
    ResourceManagerTags map[string]interface{}
    A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
    SandboxConfig NodePoolNodeConfigSandboxConfig
    Sandbox configuration for this node.
    ServiceAccount string
    The Google Cloud Platform Service Account to be used by the node VMs.
    ShieldedInstanceConfig NodePoolNodeConfigShieldedInstanceConfig
    Shielded Instance options.
    SoleTenantConfig NodePoolNodeConfigSoleTenantConfig
    Node affinity options for sole tenant node pools.
    Spot bool
    Whether the nodes are created as spot VM instances.
    Tags []string
    The list of instance tags applied to all nodes.
    Taints []NodePoolNodeConfigTaint
    List of Kubernetes taints to be applied to each node.
    WorkloadMetadataConfig NodePoolNodeConfigWorkloadMetadataConfig
    The workload metadata configuration for this node.
    advancedMachineFeatures NodePoolNodeConfigAdvancedMachineFeatures
    Specifies options for controlling advanced machine features.
    bootDiskKmsKey String
    The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
    confidentialNodes NodePoolNodeConfigConfidentialNodes
    Configuration for Confidential Nodes feature. Structure is documented below.
    diskSizeGb Integer
    Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
    diskType String
    Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
    effectiveTaints List<NodePoolNodeConfigEffectiveTaint>
    List of kubernetes taints applied to each node.
    enableConfidentialStorage Boolean
    If enabled boot disks are configured with confidential mode.
    ephemeralStorageConfig NodePoolNodeConfigEphemeralStorageConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    ephemeralStorageLocalSsdConfig NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    fastSocket NodePoolNodeConfigFastSocket
    Enable or disable NCCL Fast Socket in the node pool.
    gcfsConfig NodePoolNodeConfigGcfsConfig
    GCFS configuration for this node.
    guestAccelerators List<NodePoolNodeConfigGuestAccelerator>
    List of the type and count of accelerator cards attached to the instance.
    gvnic NodePoolNodeConfigGvnic
    Enable or disable gvnic in the node pool.
    hostMaintenancePolicy NodePoolNodeConfigHostMaintenancePolicy
    The maintenance policy for the hosts on which the GKE VMs run on.
    imageType String
    The image type to use for this node. Note that for a given image type, the latest version of it will be used.
    kubeletConfig NodePoolNodeConfigKubeletConfig
    Node kubelet configs.
    labels Map<String,String>
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
    linuxNodeConfig NodePoolNodeConfigLinuxNodeConfig
    Parameters that can be configured on Linux nodes.
    localNvmeSsdBlockConfig NodePoolNodeConfigLocalNvmeSsdBlockConfig
    Parameters for raw-block local NVMe SSDs.
    localSsdCount Integer
    The number of local SSD disks to be attached to the node.
    loggingVariant String
    Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
    machineType String
    The name of a Google Compute Engine machine type.
    metadata Map<String,String>
    The metadata key/value pairs assigned to instances in the cluster.
    minCpuPlatform String
    Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
    nodeGroup String
    Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
    oauthScopes List<String>
    The set of Google API scopes to be made available on all of the node VMs.
    preemptible Boolean
    Whether the nodes are created as preemptible VM instances.
    reservationAffinity NodePoolNodeConfigReservationAffinity
    The reservation affinity configuration for the node pool.
    resourceLabels Map<String,String>
    The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
    resourceManagerTags Map<String,Object>
    A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
    sandboxConfig NodePoolNodeConfigSandboxConfig
    Sandbox configuration for this node.
    serviceAccount String
    The Google Cloud Platform Service Account to be used by the node VMs.
    shieldedInstanceConfig NodePoolNodeConfigShieldedInstanceConfig
    Shielded Instance options.
    soleTenantConfig NodePoolNodeConfigSoleTenantConfig
    Node affinity options for sole tenant node pools.
    spot Boolean
    Whether the nodes are created as spot VM instances.
    tags List<String>
    The list of instance tags applied to all nodes.
    taints List<NodePoolNodeConfigTaint>
    List of Kubernetes taints to be applied to each node.
    workloadMetadataConfig NodePoolNodeConfigWorkloadMetadataConfig
    The workload metadata configuration for this node.
    advancedMachineFeatures NodePoolNodeConfigAdvancedMachineFeatures
    Specifies options for controlling advanced machine features.
    bootDiskKmsKey string
    The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
    confidentialNodes NodePoolNodeConfigConfidentialNodes
    Configuration for Confidential Nodes feature. Structure is documented below.
    diskSizeGb number
    Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
    diskType string
    Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
    effectiveTaints NodePoolNodeConfigEffectiveTaint[]
    List of kubernetes taints applied to each node.
    enableConfidentialStorage boolean
    If enabled boot disks are configured with confidential mode.
    ephemeralStorageConfig NodePoolNodeConfigEphemeralStorageConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    ephemeralStorageLocalSsdConfig NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    fastSocket NodePoolNodeConfigFastSocket
    Enable or disable NCCL Fast Socket in the node pool.
    gcfsConfig NodePoolNodeConfigGcfsConfig
    GCFS configuration for this node.
    guestAccelerators NodePoolNodeConfigGuestAccelerator[]
    List of the type and count of accelerator cards attached to the instance.
    gvnic NodePoolNodeConfigGvnic
    Enable or disable gvnic in the node pool.
    hostMaintenancePolicy NodePoolNodeConfigHostMaintenancePolicy
    The maintenance policy for the hosts on which the GKE VMs run on.
    imageType string
    The image type to use for this node. Note that for a given image type, the latest version of it will be used.
    kubeletConfig NodePoolNodeConfigKubeletConfig
    Node kubelet configs.
    labels {[key: string]: string}
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
    linuxNodeConfig NodePoolNodeConfigLinuxNodeConfig
    Parameters that can be configured on Linux nodes.
    localNvmeSsdBlockConfig NodePoolNodeConfigLocalNvmeSsdBlockConfig
    Parameters for raw-block local NVMe SSDs.
    localSsdCount number
    The number of local SSD disks to be attached to the node.
    loggingVariant string
    Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
    machineType string
    The name of a Google Compute Engine machine type.
    metadata {[key: string]: string}
    The metadata key/value pairs assigned to instances in the cluster.
    minCpuPlatform string
    Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
    nodeGroup string
    Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
    oauthScopes string[]
    The set of Google API scopes to be made available on all of the node VMs.
    preemptible boolean
    Whether the nodes are created as preemptible VM instances.
    reservationAffinity NodePoolNodeConfigReservationAffinity
    The reservation affinity configuration for the node pool.
    resourceLabels {[key: string]: string}
    The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
    resourceManagerTags {[key: string]: any}
    A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
    sandboxConfig NodePoolNodeConfigSandboxConfig
    Sandbox configuration for this node.
    serviceAccount string
    The Google Cloud Platform Service Account to be used by the node VMs.
    shieldedInstanceConfig NodePoolNodeConfigShieldedInstanceConfig
    Shielded Instance options.
    soleTenantConfig NodePoolNodeConfigSoleTenantConfig
    Node affinity options for sole tenant node pools.
    spot boolean
    Whether the nodes are created as spot VM instances.
    tags string[]
    The list of instance tags applied to all nodes.
    taints NodePoolNodeConfigTaint[]
    List of Kubernetes taints to be applied to each node.
    workloadMetadataConfig NodePoolNodeConfigWorkloadMetadataConfig
    The workload metadata configuration for this node.
    advanced_machine_features NodePoolNodeConfigAdvancedMachineFeatures
    Specifies options for controlling advanced machine features.
    boot_disk_kms_key str
    The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
    confidential_nodes NodePoolNodeConfigConfidentialNodes
    Configuration for Confidential Nodes feature. Structure is documented below.
    disk_size_gb int
    Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
    disk_type str
    Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
    effective_taints Sequence[NodePoolNodeConfigEffectiveTaint]
    List of kubernetes taints applied to each node.
    enable_confidential_storage bool
    If enabled boot disks are configured with confidential mode.
    ephemeral_storage_config NodePoolNodeConfigEphemeralStorageConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    ephemeral_storage_local_ssd_config NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    fast_socket NodePoolNodeConfigFastSocket
    Enable or disable NCCL Fast Socket in the node pool.
    gcfs_config NodePoolNodeConfigGcfsConfig
    GCFS configuration for this node.
    guest_accelerators Sequence[NodePoolNodeConfigGuestAccelerator]
    List of the type and count of accelerator cards attached to the instance.
    gvnic NodePoolNodeConfigGvnic
    Enable or disable gvnic in the node pool.
    host_maintenance_policy NodePoolNodeConfigHostMaintenancePolicy
    The maintenance policy for the hosts on which the GKE VMs run on.
    image_type str
    The image type to use for this node. Note that for a given image type, the latest version of it will be used.
    kubelet_config NodePoolNodeConfigKubeletConfig
    Node kubelet configs.
    labels Mapping[str, str]
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
    linux_node_config NodePoolNodeConfigLinuxNodeConfig
    Parameters that can be configured on Linux nodes.
    local_nvme_ssd_block_config NodePoolNodeConfigLocalNvmeSsdBlockConfig
    Parameters for raw-block local NVMe SSDs.
    local_ssd_count int
    The number of local SSD disks to be attached to the node.
    logging_variant str
    Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
    machine_type str
    The name of a Google Compute Engine machine type.
    metadata Mapping[str, str]
    The metadata key/value pairs assigned to instances in the cluster.
    min_cpu_platform str
    Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
    node_group str
    Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
    oauth_scopes Sequence[str]
    The set of Google API scopes to be made available on all of the node VMs.
    preemptible bool
    Whether the nodes are created as preemptible VM instances.
    reservation_affinity NodePoolNodeConfigReservationAffinity
    The reservation affinity configuration for the node pool.
    resource_labels Mapping[str, str]
    The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
    resource_manager_tags Mapping[str, Any]
    A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
    sandbox_config NodePoolNodeConfigSandboxConfig
    Sandbox configuration for this node.
    service_account str
    The Google Cloud Platform Service Account to be used by the node VMs.
    shielded_instance_config NodePoolNodeConfigShieldedInstanceConfig
    Shielded Instance options.
    sole_tenant_config NodePoolNodeConfigSoleTenantConfig
    Node affinity options for sole tenant node pools.
    spot bool
    Whether the nodes are created as spot VM instances.
    tags Sequence[str]
    The list of instance tags applied to all nodes.
    taints Sequence[NodePoolNodeConfigTaint]
    List of Kubernetes taints to be applied to each node.
    workload_metadata_config NodePoolNodeConfigWorkloadMetadataConfig
    The workload metadata configuration for this node.
    advancedMachineFeatures Property Map
    Specifies options for controlling advanced machine features.
    bootDiskKmsKey String
    The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
    confidentialNodes Property Map
    Configuration for Confidential Nodes feature. Structure is documented below.
    diskSizeGb Number
    Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
    diskType String
    Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
    effectiveTaints List<Property Map>
    List of kubernetes taints applied to each node.
    enableConfidentialStorage Boolean
    If enabled boot disks are configured with confidential mode.
    ephemeralStorageConfig Property Map
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    ephemeralStorageLocalSsdConfig Property Map
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    fastSocket Property Map
    Enable or disable NCCL Fast Socket in the node pool.
    gcfsConfig Property Map
    GCFS configuration for this node.
    guestAccelerators List<Property Map>
    List of the type and count of accelerator cards attached to the instance.
    gvnic Property Map
    Enable or disable gvnic in the node pool.
    hostMaintenancePolicy Property Map
    The maintenance policy for the hosts on which the GKE VMs run on.
    imageType String
    The image type to use for this node. Note that for a given image type, the latest version of it will be used.
    kubeletConfig Property Map
    Node kubelet configs.
    labels Map<String>
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
    linuxNodeConfig Property Map
    Parameters that can be configured on Linux nodes.
    localNvmeSsdBlockConfig Property Map
    Parameters for raw-block local NVMe SSDs.
    localSsdCount Number
    The number of local SSD disks to be attached to the node.
    loggingVariant String
    Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
    machineType String
    The name of a Google Compute Engine machine type.
    metadata Map<String>
    The metadata key/value pairs assigned to instances in the cluster.
    minCpuPlatform String
    Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
    nodeGroup String
    Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
    oauthScopes List<String>
    The set of Google API scopes to be made available on all of the node VMs.
    preemptible Boolean
    Whether the nodes are created as preemptible VM instances.
    reservationAffinity Property Map
    The reservation affinity configuration for the node pool.
    resourceLabels Map<String>
    The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
    resourceManagerTags Map<Any>
    A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
    sandboxConfig Property Map
    Sandbox configuration for this node.
    serviceAccount String
    The Google Cloud Platform Service Account to be used by the node VMs.
    shieldedInstanceConfig Property Map
    Shielded Instance options.
    soleTenantConfig Property Map
    Node affinity options for sole tenant node pools.
    spot Boolean
    Whether the nodes are created as spot VM instances.
    tags List<String>
    The list of instance tags applied to all nodes.
    taints List<Property Map>
    List of Kubernetes taints to be applied to each node.
    workloadMetadataConfig Property Map
    The workload metadata configuration for this node.

    NodePoolNodeConfigAdvancedMachineFeatures, NodePoolNodeConfigAdvancedMachineFeaturesArgs

    ThreadsPerCore int
    The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
    ThreadsPerCore int
    The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
    threadsPerCore Integer
    The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
    threadsPerCore number
    The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
    threads_per_core int
    The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
    threadsPerCore Number
    The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.

    NodePoolNodeConfigConfidentialNodes, NodePoolNodeConfigConfidentialNodesArgs

    Enabled bool
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    Enabled bool
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled Boolean
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled boolean
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled bool
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled Boolean
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.

    NodePoolNodeConfigEffectiveTaint, NodePoolNodeConfigEffectiveTaintArgs

    Effect string
    Effect for taint.
    Key string
    Key for taint.
    Value string
    Value for taint.
    Effect string
    Effect for taint.
    Key string
    Key for taint.
    Value string
    Value for taint.
    effect String
    Effect for taint.
    key String
    Key for taint.
    value String
    Value for taint.
    effect string
    Effect for taint.
    key string
    Key for taint.
    value string
    Value for taint.
    effect str
    Effect for taint.
    key str
    Key for taint.
    value str
    Value for taint.
    effect String
    Effect for taint.
    key String
    Key for taint.
    value String
    Value for taint.

    NodePoolNodeConfigEphemeralStorageConfig, NodePoolNodeConfigEphemeralStorageConfigArgs

    LocalSsdCount int
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    LocalSsdCount int
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    localSsdCount Integer
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    localSsdCount number
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    local_ssd_count int
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    localSsdCount Number
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.

    NodePoolNodeConfigEphemeralStorageLocalSsdConfig, NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs

    LocalSsdCount int
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    LocalSsdCount int
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    localSsdCount Integer
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    localSsdCount number
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    local_ssd_count int
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    localSsdCount Number
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.

    NodePoolNodeConfigFastSocket, NodePoolNodeConfigFastSocketArgs

    Enabled bool
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    Enabled bool
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled Boolean
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled boolean
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled bool
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled Boolean
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.

    NodePoolNodeConfigGcfsConfig, NodePoolNodeConfigGcfsConfigArgs

    Enabled bool
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    Enabled bool
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled Boolean
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled boolean
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled bool
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled Boolean
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.

    NodePoolNodeConfigGuestAccelerator, NodePoolNodeConfigGuestAcceleratorArgs

    Count int
    The number of the accelerator cards exposed to an instance.
    Type string
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    GpuDriverInstallationConfig NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig
    Configuration for auto installation of GPU driver.
    GpuPartitionSize string
    Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
    GpuSharingConfig NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    Configuration for GPU sharing.
    Count int
    The number of the accelerator cards exposed to an instance.
    Type string
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    GpuDriverInstallationConfig NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig
    Configuration for auto installation of GPU driver.
    GpuPartitionSize string
    Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
    GpuSharingConfig NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    Configuration for GPU sharing.
    count Integer
    The number of the accelerator cards exposed to an instance.
    type String
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    gpuDriverInstallationConfig NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig
    Configuration for auto installation of GPU driver.
    gpuPartitionSize String
    Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
    gpuSharingConfig NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    Configuration for GPU sharing.
    count number
    The number of the accelerator cards exposed to an instance.
    type string
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    gpuDriverInstallationConfig NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig
    Configuration for auto installation of GPU driver.
    gpuPartitionSize string
    Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
    gpuSharingConfig NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    Configuration for GPU sharing.
    count int
    The number of the accelerator cards exposed to an instance.
    type str
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    gpu_driver_installation_config NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig
    Configuration for auto installation of GPU driver.
    gpu_partition_size str
    Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
    gpu_sharing_config NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    Configuration for GPU sharing.
    count Number
    The number of the accelerator cards exposed to an instance.
    type String
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    gpuDriverInstallationConfig Property Map
    Configuration for auto installation of GPU driver.
    gpuPartitionSize String
    Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
    gpuSharingConfig Property Map
    Configuration for GPU sharing.

    NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig, NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs

    GpuDriverVersion string
    Mode for how the GPU driver is installed.
    GpuDriverVersion string
    Mode for how the GPU driver is installed.
    gpuDriverVersion String
    Mode for how the GPU driver is installed.
    gpuDriverVersion string
    Mode for how the GPU driver is installed.
    gpu_driver_version str
    Mode for how the GPU driver is installed.
    gpuDriverVersion String
    Mode for how the GPU driver is installed.

    NodePoolNodeConfigGuestAcceleratorGpuSharingConfig, NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs

    GpuSharingStrategy string
    The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
    MaxSharedClientsPerGpu int
    The maximum number of containers that can share a GPU.
    GpuSharingStrategy string
    The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
    MaxSharedClientsPerGpu int
    The maximum number of containers that can share a GPU.
    gpuSharingStrategy String
    The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
    maxSharedClientsPerGpu Integer
    The maximum number of containers that can share a GPU.
    gpuSharingStrategy string
    The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
    maxSharedClientsPerGpu number
    The maximum number of containers that can share a GPU.
    gpu_sharing_strategy str
    The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
    max_shared_clients_per_gpu int
    The maximum number of containers that can share a GPU.
    gpuSharingStrategy String
    The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
    maxSharedClientsPerGpu Number
    The maximum number of containers that can share a GPU.

    NodePoolNodeConfigGvnic, NodePoolNodeConfigGvnicArgs

    Enabled bool
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    Enabled bool
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled Boolean
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled boolean
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled bool
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.
    enabled Boolean
    Enable Confidential GKE Nodes for this cluster, to enforce encryption of data in-use.

    NodePoolNodeConfigHostMaintenancePolicy, NodePoolNodeConfigHostMaintenancePolicyArgs

    NodePoolNodeConfigKubeletConfig, NodePoolNodeConfigKubeletConfigArgs

    CpuManagerPolicy string
    Control the CPU management policy on the node.
    CpuCfsQuota bool
    Enable CPU CFS quota enforcement for containers that specify CPU limits.
    CpuCfsQuotaPeriod string
    Set the CPU CFS quota period value 'cpu.cfs_period_us'.
    PodPidsLimit int
    Controls the maximum number of processes allowed to run in a pod.
    CpuManagerPolicy string
    Control the CPU management policy on the node.
    CpuCfsQuota bool
    Enable CPU CFS quota enforcement for containers that specify CPU limits.
    CpuCfsQuotaPeriod string
    Set the CPU CFS quota period value 'cpu.cfs_period_us'.
    PodPidsLimit int
    Controls the maximum number of processes allowed to run in a pod.
    cpuManagerPolicy String
    Control the CPU management policy on the node.
    cpuCfsQuota Boolean
    Enable CPU CFS quota enforcement for containers that specify CPU limits.
    cpuCfsQuotaPeriod String
    Set the CPU CFS quota period value 'cpu.cfs_period_us'.
    podPidsLimit Integer
    Controls the maximum number of processes allowed to run in a pod.
    cpuManagerPolicy string
    Control the CPU management policy on the node.
    cpuCfsQuota boolean
    Enable CPU CFS quota enforcement for containers that specify CPU limits.
    cpuCfsQuotaPeriod string
    Set the CPU CFS quota period value 'cpu.cfs_period_us'.
    podPidsLimit number
    Controls the maximum number of processes allowed to run in a pod.
    cpu_manager_policy str
    Control the CPU management policy on the node.
    cpu_cfs_quota bool
    Enable CPU CFS quota enforcement for containers that specify CPU limits.
    cpu_cfs_quota_period str
    Set the CPU CFS quota period value 'cpu.cfs_period_us'.
    pod_pids_limit int
    Controls the maximum number of processes allowed to run in a pod.
    cpuManagerPolicy String
    Control the CPU management policy on the node.
    cpuCfsQuota Boolean
    Enable CPU CFS quota enforcement for containers that specify CPU limits.
    cpuCfsQuotaPeriod String
    Set the CPU CFS quota period value 'cpu.cfs_period_us'.
    podPidsLimit Number
    Controls the maximum number of processes allowed to run in a pod.

    NodePoolNodeConfigLinuxNodeConfig, NodePoolNodeConfigLinuxNodeConfigArgs

    CgroupMode string
    cgroupMode specifies the cgroup mode to be used on the node.
    Sysctls Dictionary<string, string>
    The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
    CgroupMode string
    cgroupMode specifies the cgroup mode to be used on the node.
    Sysctls map[string]string
    The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
    cgroupMode String
    cgroupMode specifies the cgroup mode to be used on the node.
    sysctls Map<String,String>
    The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
    cgroupMode string
    cgroupMode specifies the cgroup mode to be used on the node.
    sysctls {[key: string]: string}
    The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
    cgroup_mode str
    cgroupMode specifies the cgroup mode to be used on the node.
    sysctls Mapping[str, str]
    The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
    cgroupMode String
    cgroupMode specifies the cgroup mode to be used on the node.
    sysctls Map<String>
    The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.

    NodePoolNodeConfigLocalNvmeSsdBlockConfig, NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs

    LocalSsdCount int
    Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
    LocalSsdCount int
    Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
    localSsdCount Integer
    Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
    localSsdCount number
    Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
    local_ssd_count int
    Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
    localSsdCount Number
    Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.

    NodePoolNodeConfigReservationAffinity, NodePoolNodeConfigReservationAffinityArgs

    ConsumeReservationType string
    Corresponds to the type of reservation consumption.
    Key string
    The label key of a reservation resource.
    Values List<string>
    The label values of the reservation resource.
    ConsumeReservationType string
    Corresponds to the type of reservation consumption.
    Key string
    The label key of a reservation resource.
    Values []string
    The label values of the reservation resource.
    consumeReservationType String
    Corresponds to the type of reservation consumption.
    key String
    The label key of a reservation resource.
    values List<String>
    The label values of the reservation resource.
    consumeReservationType string
    Corresponds to the type of reservation consumption.
    key string
    The label key of a reservation resource.
    values string[]
    The label values of the reservation resource.
    consume_reservation_type str
    Corresponds to the type of reservation consumption.
    key str
    The label key of a reservation resource.
    values Sequence[str]
    The label values of the reservation resource.
    consumeReservationType String
    Corresponds to the type of reservation consumption.
    key String
    The label key of a reservation resource.
    values List<String>
    The label values of the reservation resource.

    NodePoolNodeConfigSandboxConfig, NodePoolNodeConfigSandboxConfigArgs

    SandboxType string
    Type of the sandbox to use for the node (e.g. 'gvisor')
    SandboxType string
    Type of the sandbox to use for the node (e.g. 'gvisor')
    sandboxType String
    Type of the sandbox to use for the node (e.g. 'gvisor')
    sandboxType string
    Type of the sandbox to use for the node (e.g. 'gvisor')
    sandbox_type str
    Type of the sandbox to use for the node (e.g. 'gvisor')
    sandboxType String
    Type of the sandbox to use for the node (e.g. 'gvisor')

    NodePoolNodeConfigShieldedInstanceConfig, NodePoolNodeConfigShieldedInstanceConfigArgs

    EnableIntegrityMonitoring bool
    Defines whether the instance has integrity monitoring enabled.
    EnableSecureBoot bool
    Defines whether the instance has Secure Boot enabled.
    EnableIntegrityMonitoring bool
    Defines whether the instance has integrity monitoring enabled.
    EnableSecureBoot bool
    Defines whether the instance has Secure Boot enabled.
    enableIntegrityMonitoring Boolean
    Defines whether the instance has integrity monitoring enabled.
    enableSecureBoot Boolean
    Defines whether the instance has Secure Boot enabled.
    enableIntegrityMonitoring boolean
    Defines whether the instance has integrity monitoring enabled.
    enableSecureBoot boolean
    Defines whether the instance has Secure Boot enabled.
    enable_integrity_monitoring bool
    Defines whether the instance has integrity monitoring enabled.
    enable_secure_boot bool
    Defines whether the instance has Secure Boot enabled.
    enableIntegrityMonitoring Boolean
    Defines whether the instance has integrity monitoring enabled.
    enableSecureBoot Boolean
    Defines whether the instance has Secure Boot enabled.

    NodePoolNodeConfigSoleTenantConfig, NodePoolNodeConfigSoleTenantConfigArgs

    NodePoolNodeConfigSoleTenantConfigNodeAffinity, NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs

    Key string
    .
    Operator string
    .
    Values List<string>
    .
    Key string
    .
    Operator string
    .
    Values []string
    .
    key String
    .
    operator String
    .
    values List<String>
    .
    key string
    .
    operator string
    .
    values string[]
    .
    key str
    .
    operator str
    .
    values Sequence[str]
    .
    key String
    .
    operator String
    .
    values List<String>
    .

    NodePoolNodeConfigTaint, NodePoolNodeConfigTaintArgs

    Effect string
    Effect for taint.
    Key string
    Key for taint.
    Value string
    Value for taint.
    Effect string
    Effect for taint.
    Key string
    Key for taint.
    Value string
    Value for taint.
    effect String
    Effect for taint.
    key String
    Key for taint.
    value String
    Value for taint.
    effect string
    Effect for taint.
    key string
    Key for taint.
    value string
    Value for taint.
    effect str
    Effect for taint.
    key str
    Key for taint.
    value str
    Value for taint.
    effect String
    Effect for taint.
    key String
    Key for taint.
    value String
    Value for taint.

    NodePoolNodeConfigWorkloadMetadataConfig, NodePoolNodeConfigWorkloadMetadataConfigArgs

    Mode string
    Mode is the configuration for how to expose metadata to workloads running on the node.
    Mode string
    Mode is the configuration for how to expose metadata to workloads running on the node.
    mode String
    Mode is the configuration for how to expose metadata to workloads running on the node.
    mode string
    Mode is the configuration for how to expose metadata to workloads running on the node.
    mode str
    Mode is the configuration for how to expose metadata to workloads running on the node.
    mode String
    Mode is the configuration for how to expose metadata to workloads running on the node.

    NodePoolPlacementPolicy, NodePoolPlacementPolicyArgs

    Type string
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    PolicyName string
    If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
    TpuTopology string
    The TPU placement topology for pod slice node pool.
    Type string
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    PolicyName string
    If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
    TpuTopology string
    The TPU placement topology for pod slice node pool.
    type String
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    policyName String
    If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
    tpuTopology String
    The TPU placement topology for pod slice node pool.
    type string
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    policyName string
    If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
    tpuTopology string
    The TPU placement topology for pod slice node pool.
    type str
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    policy_name str
    If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
    tpu_topology str
    The TPU placement topology for pod slice node pool.
    type String
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    policyName String
    If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
    tpuTopology String
    The TPU placement topology for pod slice node pool.

    NodePoolQueuedProvisioning, NodePoolQueuedProvisioningArgs

    Enabled bool
    Makes nodes obtainable through the ProvisioningRequest API exclusively.
    Enabled bool
    Makes nodes obtainable through the ProvisioningRequest API exclusively.
    enabled Boolean
    Makes nodes obtainable through the ProvisioningRequest API exclusively.
    enabled boolean
    Makes nodes obtainable through the ProvisioningRequest API exclusively.
    enabled bool
    Makes nodes obtainable through the ProvisioningRequest API exclusively.
    enabled Boolean
    Makes nodes obtainable through the ProvisioningRequest API exclusively.

    NodePoolUpgradeSettings, NodePoolUpgradeSettingsArgs

    BlueGreenSettings NodePoolUpgradeSettingsBlueGreenSettings
    The settings to adjust blue green upgrades. Structure is documented below
    MaxSurge int
    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
    MaxUnavailable int

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    Strategy string
    The upgrade stragey to be used for upgrading the nodes.
    BlueGreenSettings NodePoolUpgradeSettingsBlueGreenSettings
    The settings to adjust blue green upgrades. Structure is documented below
    MaxSurge int
    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
    MaxUnavailable int

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    Strategy string
    The upgrade stragey to be used for upgrading the nodes.
    blueGreenSettings NodePoolUpgradeSettingsBlueGreenSettings
    The settings to adjust blue green upgrades. Structure is documented below
    maxSurge Integer
    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
    maxUnavailable Integer

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    strategy String
    The upgrade stragey to be used for upgrading the nodes.
    blueGreenSettings NodePoolUpgradeSettingsBlueGreenSettings
    The settings to adjust blue green upgrades. Structure is documented below
    maxSurge number
    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
    maxUnavailable number

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    strategy string
    The upgrade stragey to be used for upgrading the nodes.
    blue_green_settings NodePoolUpgradeSettingsBlueGreenSettings
    The settings to adjust blue green upgrades. Structure is documented below
    max_surge int
    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
    max_unavailable int

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    strategy str
    The upgrade stragey to be used for upgrading the nodes.
    blueGreenSettings Property Map
    The settings to adjust blue green upgrades. Structure is documented below
    maxSurge Number
    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
    maxUnavailable Number

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    strategy String
    The upgrade stragey to be used for upgrading the nodes.

    NodePoolUpgradeSettingsBlueGreenSettings, NodePoolUpgradeSettingsBlueGreenSettingsArgs

    StandardRolloutPolicy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy
    Specifies the standard policy settings for blue-green upgrades.
    NodePoolSoakDuration string
    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
    StandardRolloutPolicy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy
    Specifies the standard policy settings for blue-green upgrades.
    NodePoolSoakDuration string
    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
    standardRolloutPolicy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy
    Specifies the standard policy settings for blue-green upgrades.
    nodePoolSoakDuration String
    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
    standardRolloutPolicy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy
    Specifies the standard policy settings for blue-green upgrades.
    nodePoolSoakDuration string
    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
    standard_rollout_policy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy
    Specifies the standard policy settings for blue-green upgrades.
    node_pool_soak_duration str
    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
    standardRolloutPolicy Property Map
    Specifies the standard policy settings for blue-green upgrades.
    nodePoolSoakDuration String
    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.

    NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy, NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs

    BatchNodeCount int
    Number of blue nodes to drain in a batch.
    BatchPercentage double
    Percentage of the blue pool nodes to drain in a batch.
    BatchSoakDuration string
    Soak time after each batch gets drained.
    BatchNodeCount int
    Number of blue nodes to drain in a batch.
    BatchPercentage float64
    Percentage of the blue pool nodes to drain in a batch.
    BatchSoakDuration string
    Soak time after each batch gets drained.
    batchNodeCount Integer
    Number of blue nodes to drain in a batch.
    batchPercentage Double
    Percentage of the blue pool nodes to drain in a batch.
    batchSoakDuration String
    Soak time after each batch gets drained.
    batchNodeCount number
    Number of blue nodes to drain in a batch.
    batchPercentage number
    Percentage of the blue pool nodes to drain in a batch.
    batchSoakDuration string
    Soak time after each batch gets drained.
    batch_node_count int
    Number of blue nodes to drain in a batch.
    batch_percentage float
    Percentage of the blue pool nodes to drain in a batch.
    batch_soak_duration str
    Soak time after each batch gets drained.
    batchNodeCount Number
    Number of blue nodes to drain in a batch.
    batchPercentage Number
    Percentage of the blue pool nodes to drain in a batch.
    batchSoakDuration String
    Soak time after each batch gets drained.

    Import

    Node pools can be imported using the project, location, cluster and name. If

    the project is omitted, the project value in the provider configuration will be used. Examples:

    • {{project_id}}/{{location}}/{{cluster_id}}/{{pool_id}}

    • {{location}}/{{cluster_id}}/{{pool_id}}

    When using the pulumi import command, node pools can be imported using one of the formats above. For example:

    $ pulumi import gcp:container/nodePool:NodePool default {{project_id}}/{{location}}/{{cluster_id}}/{{pool_id}}
    
    $ pulumi import gcp:container/nodePool:NodePool default {{location}}/{{cluster_id}}/{{pool_id}}
    

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v7.16.0 published on Wednesday, Mar 27, 2024 by Pulumi