1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. container
  5. NodePool
Google Cloud Classic v6.57.0 published on Tuesday, May 30, 2023 by Pulumi

gcp.container.NodePool

Explore with Pulumi AI

gcp logo
Google Cloud Classic v6.57.0 published on Tuesday, May 30, 2023 by Pulumi

    Manages a node pool in a Google Kubernetes Engine (GKE) cluster separately from the cluster control plane. For more information see the official documentation and the API reference.

    Example Usage

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var @default = new Gcp.ServiceAccount.Account("default", new()
        {
            AccountId = "service-account-id",
            DisplayName = "Service Account",
        });
    
        var primary = new Gcp.Container.Cluster("primary", new()
        {
            Location = "us-central1",
            RemoveDefaultNodePool = true,
            InitialNodeCount = 1,
        });
    
        var primaryPreemptibleNodes = new Gcp.Container.NodePool("primaryPreemptibleNodes", new()
        {
            Cluster = primary.Id,
            NodeCount = 1,
            NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
            {
                Preemptible = true,
                MachineType = "e2-medium",
                ServiceAccount = @default.Email,
                OauthScopes = new[]
                {
                    "https://www.googleapis.com/auth/cloud-platform",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/container"
    	"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/serviceAccount"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := serviceAccount.NewAccount(ctx, "default", &serviceAccount.AccountArgs{
    			AccountId:   pulumi.String("service-account-id"),
    			DisplayName: pulumi.String("Service Account"),
    		})
    		if err != nil {
    			return err
    		}
    		primary, err := container.NewCluster(ctx, "primary", &container.ClusterArgs{
    			Location:              pulumi.String("us-central1"),
    			RemoveDefaultNodePool: pulumi.Bool(true),
    			InitialNodeCount:      pulumi.Int(1),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = container.NewNodePool(ctx, "primaryPreemptibleNodes", &container.NodePoolArgs{
    			Cluster:   primary.ID(),
    			NodeCount: pulumi.Int(1),
    			NodeConfig: &container.NodePoolNodeConfigArgs{
    				Preemptible:    pulumi.Bool(true),
    				MachineType:    pulumi.String("e2-medium"),
    				ServiceAccount: _default.Email,
    				OauthScopes: pulumi.StringArray{
    					pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.serviceAccount.Account;
    import com.pulumi.gcp.serviceAccount.AccountArgs;
    import com.pulumi.gcp.container.Cluster;
    import com.pulumi.gcp.container.ClusterArgs;
    import com.pulumi.gcp.container.NodePool;
    import com.pulumi.gcp.container.NodePoolArgs;
    import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var default_ = new Account("default", AccountArgs.builder()        
                .accountId("service-account-id")
                .displayName("Service Account")
                .build());
    
            var primary = new Cluster("primary", ClusterArgs.builder()        
                .location("us-central1")
                .removeDefaultNodePool(true)
                .initialNodeCount(1)
                .build());
    
            var primaryPreemptibleNodes = new NodePool("primaryPreemptibleNodes", NodePoolArgs.builder()        
                .cluster(primary.id())
                .nodeCount(1)
                .nodeConfig(NodePoolNodeConfigArgs.builder()
                    .preemptible(true)
                    .machineType("e2-medium")
                    .serviceAccount(default_.email())
                    .oauthScopes("https://www.googleapis.com/auth/cloud-platform")
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    default = gcp.service_account.Account("default",
        account_id="service-account-id",
        display_name="Service Account")
    primary = gcp.container.Cluster("primary",
        location="us-central1",
        remove_default_node_pool=True,
        initial_node_count=1)
    primary_preemptible_nodes = gcp.container.NodePool("primaryPreemptibleNodes",
        cluster=primary.id,
        node_count=1,
        node_config=gcp.container.NodePoolNodeConfigArgs(
            preemptible=True,
            machine_type="e2-medium",
            service_account=default.email,
            oauth_scopes=["https://www.googleapis.com/auth/cloud-platform"],
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const _default = new gcp.serviceaccount.Account("default", {
        accountId: "service-account-id",
        displayName: "Service Account",
    });
    const primary = new gcp.container.Cluster("primary", {
        location: "us-central1",
        removeDefaultNodePool: true,
        initialNodeCount: 1,
    });
    const primaryPreemptibleNodes = new gcp.container.NodePool("primaryPreemptibleNodes", {
        cluster: primary.id,
        nodeCount: 1,
        nodeConfig: {
            preemptible: true,
            machineType: "e2-medium",
            serviceAccount: _default.email,
            oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
        },
    });
    
    resources:
      default:
        type: gcp:serviceAccount:Account
        properties:
          accountId: service-account-id
          displayName: Service Account
      primary:
        type: gcp:container:Cluster
        properties:
          location: us-central1
          # We can't create a cluster with no node pool defined, but we want to only use
          #   # separately managed node pools. So we create the smallest possible default
          #   # node pool and immediately delete it.
          removeDefaultNodePool: true
          initialNodeCount: 1
      primaryPreemptibleNodes:
        type: gcp:container:NodePool
        properties:
          cluster: ${primary.id}
          nodeCount: 1
          nodeConfig:
            preemptible: true
            machineType: e2-medium
            serviceAccount: ${default.email}
            oauthScopes:
              - https://www.googleapis.com/auth/cloud-platform
    

    2 Node Pools, 1 Separately Managed + The Default Node Pool

    Coming soon!

    Coming soon!

    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.serviceAccount.Account;
    import com.pulumi.gcp.serviceAccount.AccountArgs;
    import com.pulumi.gcp.container.Cluster;
    import com.pulumi.gcp.container.ClusterArgs;
    import com.pulumi.gcp.container.inputs.ClusterNodeConfigArgs;
    import com.pulumi.gcp.container.NodePool;
    import com.pulumi.gcp.container.NodePoolArgs;
    import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var default_ = new Account("default", AccountArgs.builder()        
                .accountId("service-account-id")
                .displayName("Service Account")
                .build());
    
            var primary = new Cluster("primary", ClusterArgs.builder()        
                .location("us-central1-a")
                .initialNodeCount(3)
                .nodeLocations("us-central1-c")
                .nodeConfig(ClusterNodeConfigArgs.builder()
                    .serviceAccount(default_.email())
                    .oauthScopes("https://www.googleapis.com/auth/cloud-platform")
                    .guestAccelerators(ClusterNodeConfigGuestAcceleratorArgs.builder()
                        .type("nvidia-tesla-k80")
                        .count(1)
                        .build())
                    .build())
                .build());
    
            var np = new NodePool("np", NodePoolArgs.builder()        
                .cluster(primary.id())
                .nodeConfig(NodePoolNodeConfigArgs.builder()
                    .machineType("e2-medium")
                    .serviceAccount(default_.email())
                    .oauthScopes("https://www.googleapis.com/auth/cloud-platform")
                    .build())
                .timeouts(%!v(PANIC=Format method: runtime error: invalid memory address or nil pointer dereference))
                .build());
    
        }
    }
    

    Coming soon!

    Coming soon!

    resources:
      default:
        type: gcp:serviceAccount:Account
        properties:
          accountId: service-account-id
          displayName: Service Account
      np:
        type: gcp:container:NodePool
        properties:
          cluster: ${primary.id}
          nodeConfig:
            machineType: e2-medium
            serviceAccount: ${default.email}
            oauthScopes:
              - https://www.googleapis.com/auth/cloud-platform
          timeouts:
            - create: 30m
              update: 20m
      primary:
        type: gcp:container:Cluster
        properties:
          location: us-central1-a
          initialNodeCount: 3
          nodeLocations:
            - us-central1-c
          nodeConfig:
            serviceAccount: ${default.email}
            oauthScopes:
              - https://www.googleapis.com/auth/cloud-platform
            guestAccelerators:
              - type: nvidia-tesla-k80
                count: 1
    

    Create NodePool Resource

    new NodePool(name: string, args: NodePoolArgs, opts?: CustomResourceOptions);
    @overload
    def NodePool(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 autoscaling: Optional[NodePoolAutoscalingArgs] = None,
                 cluster: Optional[str] = None,
                 initial_node_count: Optional[int] = None,
                 location: Optional[str] = None,
                 management: Optional[NodePoolManagementArgs] = None,
                 max_pods_per_node: Optional[int] = None,
                 name: Optional[str] = None,
                 name_prefix: Optional[str] = None,
                 network_config: Optional[NodePoolNetworkConfigArgs] = None,
                 node_config: Optional[NodePoolNodeConfigArgs] = None,
                 node_count: Optional[int] = None,
                 node_locations: Optional[Sequence[str]] = None,
                 placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
                 project: Optional[str] = None,
                 upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
                 version: Optional[str] = None)
    @overload
    def NodePool(resource_name: str,
                 args: NodePoolArgs,
                 opts: Optional[ResourceOptions] = None)
    func NewNodePool(ctx *Context, name string, args NodePoolArgs, opts ...ResourceOption) (*NodePool, error)
    public NodePool(string name, NodePoolArgs args, CustomResourceOptions? opts = null)
    public NodePool(String name, NodePoolArgs args)
    public NodePool(String name, NodePoolArgs args, CustomResourceOptions options)
    
    type: gcp:container:NodePool
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    NodePool Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The NodePool resource accepts the following input properties:

    Cluster string

    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    Autoscaling NodePoolAutoscalingArgs

    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.

    InitialNodeCount int

    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.

    Location string

    The location (region or zone) of the cluster.


    Management NodePoolManagementArgs

    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.

    MaxPodsPerNode int

    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.

    Name string

    The name of the node pool. If left blank, the provider will auto-generate a unique name.

    NamePrefix string

    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.

    NetworkConfig NodePoolNetworkConfigArgs

    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below

    NodeConfig NodePoolNodeConfigArgs

    Parameters used in creating the node pool. See gcp.container.Cluster for schema.

    NodeCount int

    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.

    NodeLocations List<string>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    PlacementPolicy NodePoolPlacementPolicyArgs

    Specifies a custom placement policy for the nodes.

    The autoscaling block supports (either total or per zone limits are required):

    Project string

    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.

    UpgradeSettings NodePoolUpgradeSettingsArgs

    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.

    Version string

    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    Cluster string

    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    Autoscaling NodePoolAutoscalingArgs

    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.

    InitialNodeCount int

    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.

    Location string

    The location (region or zone) of the cluster.


    Management NodePoolManagementArgs

    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.

    MaxPodsPerNode int

    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.

    Name string

    The name of the node pool. If left blank, the provider will auto-generate a unique name.

    NamePrefix string

    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.

    NetworkConfig NodePoolNetworkConfigArgs

    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below

    NodeConfig NodePoolNodeConfigArgs

    Parameters used in creating the node pool. See gcp.container.Cluster for schema.

    NodeCount int

    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.

    NodeLocations []string

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    PlacementPolicy NodePoolPlacementPolicyArgs

    Specifies a custom placement policy for the nodes.

    The autoscaling block supports (either total or per zone limits are required):

    Project string

    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.

    UpgradeSettings NodePoolUpgradeSettingsArgs

    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.

    Version string

    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    cluster String

    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    autoscaling NodePoolAutoscalingArgs

    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.

    initialNodeCount Integer

    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.

    location String

    The location (region or zone) of the cluster.


    management NodePoolManagementArgs

    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.

    maxPodsPerNode Integer

    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.

    name String

    The name of the node pool. If left blank, the provider will auto-generate a unique name.

    namePrefix String

    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.

    networkConfig NodePoolNetworkConfigArgs

    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below

    nodeConfig NodePoolNodeConfigArgs

    Parameters used in creating the node pool. See gcp.container.Cluster for schema.

    nodeCount Integer

    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.

    nodeLocations List<String>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    placementPolicy NodePoolPlacementPolicyArgs

    Specifies a custom placement policy for the nodes.

    The autoscaling block supports (either total or per zone limits are required):

    project String

    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.

    upgradeSettings NodePoolUpgradeSettingsArgs

    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.

    version String

    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    cluster string

    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    autoscaling NodePoolAutoscalingArgs

    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.

    initialNodeCount number

    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.

    location string

    The location (region or zone) of the cluster.


    management NodePoolManagementArgs

    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.

    maxPodsPerNode number

    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.

    name string

    The name of the node pool. If left blank, the provider will auto-generate a unique name.

    namePrefix string

    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.

    networkConfig NodePoolNetworkConfigArgs

    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below

    nodeConfig NodePoolNodeConfigArgs

    Parameters used in creating the node pool. See gcp.container.Cluster for schema.

    nodeCount number

    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.

    nodeLocations string[]

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    placementPolicy NodePoolPlacementPolicyArgs

    Specifies a custom placement policy for the nodes.

    The autoscaling block supports (either total or per zone limits are required):

    project string

    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.

    upgradeSettings NodePoolUpgradeSettingsArgs

    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.

    version string

    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    cluster str

    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    autoscaling NodePoolAutoscalingArgs

    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.

    initial_node_count int

    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.

    location str

    The location (region or zone) of the cluster.


    management NodePoolManagementArgs

    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.

    max_pods_per_node int

    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.

    name str

    The name of the node pool. If left blank, the provider will auto-generate a unique name.

    name_prefix str

    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.

    network_config NodePoolNetworkConfigArgs

    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below

    node_config NodePoolNodeConfigArgs

    Parameters used in creating the node pool. See gcp.container.Cluster for schema.

    node_count int

    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.

    node_locations Sequence[str]

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    placement_policy NodePoolPlacementPolicyArgs

    Specifies a custom placement policy for the nodes.

    The autoscaling block supports (either total or per zone limits are required):

    project str

    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.

    upgrade_settings NodePoolUpgradeSettingsArgs

    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.

    version str

    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    cluster String

    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    autoscaling Property Map

    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.

    initialNodeCount Number

    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.

    location String

    The location (region or zone) of the cluster.


    management Property Map

    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.

    maxPodsPerNode Number

    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.

    name String

    The name of the node pool. If left blank, the provider will auto-generate a unique name.

    namePrefix String

    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.

    networkConfig Property Map

    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below

    nodeConfig Property Map

    Parameters used in creating the node pool. See gcp.container.Cluster for schema.

    nodeCount Number

    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.

    nodeLocations List<String>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    placementPolicy Property Map

    Specifies a custom placement policy for the nodes.

    The autoscaling block supports (either total or per zone limits are required):

    project String

    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.

    upgradeSettings Property Map

    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.

    version String

    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the NodePool resource produces the following output properties:

    Id string

    The provider-assigned unique ID for this managed resource.

    InstanceGroupUrls List<string>

    The resource URLs of the managed instance groups associated with this node pool.

    ManagedInstanceGroupUrls List<string>

    List of instance group URLs which have been assigned to this node pool.

    Operation string
    Id string

    The provider-assigned unique ID for this managed resource.

    InstanceGroupUrls []string

    The resource URLs of the managed instance groups associated with this node pool.

    ManagedInstanceGroupUrls []string

    List of instance group URLs which have been assigned to this node pool.

    Operation string
    id String

    The provider-assigned unique ID for this managed resource.

    instanceGroupUrls List<String>

    The resource URLs of the managed instance groups associated with this node pool.

    managedInstanceGroupUrls List<String>

    List of instance group URLs which have been assigned to this node pool.

    operation String
    id string

    The provider-assigned unique ID for this managed resource.

    instanceGroupUrls string[]

    The resource URLs of the managed instance groups associated with this node pool.

    managedInstanceGroupUrls string[]

    List of instance group URLs which have been assigned to this node pool.

    operation string
    id str

    The provider-assigned unique ID for this managed resource.

    instance_group_urls Sequence[str]

    The resource URLs of the managed instance groups associated with this node pool.

    managed_instance_group_urls Sequence[str]

    List of instance group URLs which have been assigned to this node pool.

    operation str
    id String

    The provider-assigned unique ID for this managed resource.

    instanceGroupUrls List<String>

    The resource URLs of the managed instance groups associated with this node pool.

    managedInstanceGroupUrls List<String>

    List of instance group URLs which have been assigned to this node pool.

    operation String

    Look up Existing NodePool Resource

    Get an existing NodePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: NodePoolState, opts?: CustomResourceOptions): NodePool
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            autoscaling: Optional[NodePoolAutoscalingArgs] = None,
            cluster: Optional[str] = None,
            initial_node_count: Optional[int] = None,
            instance_group_urls: Optional[Sequence[str]] = None,
            location: Optional[str] = None,
            managed_instance_group_urls: Optional[Sequence[str]] = None,
            management: Optional[NodePoolManagementArgs] = None,
            max_pods_per_node: Optional[int] = None,
            name: Optional[str] = None,
            name_prefix: Optional[str] = None,
            network_config: Optional[NodePoolNetworkConfigArgs] = None,
            node_config: Optional[NodePoolNodeConfigArgs] = None,
            node_count: Optional[int] = None,
            node_locations: Optional[Sequence[str]] = None,
            operation: Optional[str] = None,
            placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
            project: Optional[str] = None,
            upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
            version: Optional[str] = None) -> NodePool
    func GetNodePool(ctx *Context, name string, id IDInput, state *NodePoolState, opts ...ResourceOption) (*NodePool, error)
    public static NodePool Get(string name, Input<string> id, NodePoolState? state, CustomResourceOptions? opts = null)
    public static NodePool get(String name, Output<String> id, NodePoolState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Autoscaling NodePoolAutoscalingArgs

    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.

    Cluster string

    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    InitialNodeCount int

    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.

    InstanceGroupUrls List<string>

    The resource URLs of the managed instance groups associated with this node pool.

    Location string

    The location (region or zone) of the cluster.


    ManagedInstanceGroupUrls List<string>

    List of instance group URLs which have been assigned to this node pool.

    Management NodePoolManagementArgs

    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.

    MaxPodsPerNode int

    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.

    Name string

    The name of the node pool. If left blank, the provider will auto-generate a unique name.

    NamePrefix string

    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.

    NetworkConfig NodePoolNetworkConfigArgs

    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below

    NodeConfig NodePoolNodeConfigArgs

    Parameters used in creating the node pool. See gcp.container.Cluster for schema.

    NodeCount int

    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.

    NodeLocations List<string>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    Operation string
    PlacementPolicy NodePoolPlacementPolicyArgs

    Specifies a custom placement policy for the nodes.

    The autoscaling block supports (either total or per zone limits are required):

    Project string

    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.

    UpgradeSettings NodePoolUpgradeSettingsArgs

    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.

    Version string

    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    Autoscaling NodePoolAutoscalingArgs

    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.

    Cluster string

    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    InitialNodeCount int

    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.

    InstanceGroupUrls []string

    The resource URLs of the managed instance groups associated with this node pool.

    Location string

    The location (region or zone) of the cluster.


    ManagedInstanceGroupUrls []string

    List of instance group URLs which have been assigned to this node pool.

    Management NodePoolManagementArgs

    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.

    MaxPodsPerNode int

    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.

    Name string

    The name of the node pool. If left blank, the provider will auto-generate a unique name.

    NamePrefix string

    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.

    NetworkConfig NodePoolNetworkConfigArgs

    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below

    NodeConfig NodePoolNodeConfigArgs

    Parameters used in creating the node pool. See gcp.container.Cluster for schema.

    NodeCount int

    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.

    NodeLocations []string

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    Operation string
    PlacementPolicy NodePoolPlacementPolicyArgs

    Specifies a custom placement policy for the nodes.

    The autoscaling block supports (either total or per zone limits are required):

    Project string

    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.

    UpgradeSettings NodePoolUpgradeSettingsArgs

    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.

    Version string

    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    autoscaling NodePoolAutoscalingArgs

    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.

    cluster String

    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    initialNodeCount Integer

    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.

    instanceGroupUrls List<String>

    The resource URLs of the managed instance groups associated with this node pool.

    location String

    The location (region or zone) of the cluster.


    managedInstanceGroupUrls List<String>

    List of instance group URLs which have been assigned to this node pool.

    management NodePoolManagementArgs

    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.

    maxPodsPerNode Integer

    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.

    name String

    The name of the node pool. If left blank, the provider will auto-generate a unique name.

    namePrefix String

    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.

    networkConfig NodePoolNetworkConfigArgs

    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below

    nodeConfig NodePoolNodeConfigArgs

    Parameters used in creating the node pool. See gcp.container.Cluster for schema.

    nodeCount Integer

    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.

    nodeLocations List<String>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    operation String
    placementPolicy NodePoolPlacementPolicyArgs

    Specifies a custom placement policy for the nodes.

    The autoscaling block supports (either total or per zone limits are required):

    project String

    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.

    upgradeSettings NodePoolUpgradeSettingsArgs

    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.

    version String

    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    autoscaling NodePoolAutoscalingArgs

    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.

    cluster string

    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    initialNodeCount number

    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.

    instanceGroupUrls string[]

    The resource URLs of the managed instance groups associated with this node pool.

    location string

    The location (region or zone) of the cluster.


    managedInstanceGroupUrls string[]

    List of instance group URLs which have been assigned to this node pool.

    management NodePoolManagementArgs

    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.

    maxPodsPerNode number

    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.

    name string

    The name of the node pool. If left blank, the provider will auto-generate a unique name.

    namePrefix string

    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.

    networkConfig NodePoolNetworkConfigArgs

    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below

    nodeConfig NodePoolNodeConfigArgs

    Parameters used in creating the node pool. See gcp.container.Cluster for schema.

    nodeCount number

    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.

    nodeLocations string[]

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    operation string
    placementPolicy NodePoolPlacementPolicyArgs

    Specifies a custom placement policy for the nodes.

    The autoscaling block supports (either total or per zone limits are required):

    project string

    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.

    upgradeSettings NodePoolUpgradeSettingsArgs

    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.

    version string

    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    autoscaling NodePoolAutoscalingArgs

    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.

    cluster str

    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    initial_node_count int

    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.

    instance_group_urls Sequence[str]

    The resource URLs of the managed instance groups associated with this node pool.

    location str

    The location (region or zone) of the cluster.


    managed_instance_group_urls Sequence[str]

    List of instance group URLs which have been assigned to this node pool.

    management NodePoolManagementArgs

    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.

    max_pods_per_node int

    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.

    name str

    The name of the node pool. If left blank, the provider will auto-generate a unique name.

    name_prefix str

    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.

    network_config NodePoolNetworkConfigArgs

    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below

    node_config NodePoolNodeConfigArgs

    Parameters used in creating the node pool. See gcp.container.Cluster for schema.

    node_count int

    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.

    node_locations Sequence[str]

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    operation str
    placement_policy NodePoolPlacementPolicyArgs

    Specifies a custom placement policy for the nodes.

    The autoscaling block supports (either total or per zone limits are required):

    project str

    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.

    upgrade_settings NodePoolUpgradeSettingsArgs

    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.

    version str

    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    autoscaling Property Map

    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.

    cluster String

    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    initialNodeCount Number

    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.

    instanceGroupUrls List<String>

    The resource URLs of the managed instance groups associated with this node pool.

    location String

    The location (region or zone) of the cluster.


    managedInstanceGroupUrls List<String>

    List of instance group URLs which have been assigned to this node pool.

    management Property Map

    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.

    maxPodsPerNode Number

    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.

    name String

    The name of the node pool. If left blank, the provider will auto-generate a unique name.

    namePrefix String

    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.

    networkConfig Property Map

    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below

    nodeConfig Property Map

    Parameters used in creating the node pool. See gcp.container.Cluster for schema.

    nodeCount Number

    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.

    nodeLocations List<String>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    operation String
    placementPolicy Property Map

    Specifies a custom placement policy for the nodes.

    The autoscaling block supports (either total or per zone limits are required):

    project String

    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.

    upgradeSettings Property Map

    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.

    version String

    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    Supporting Types

    NodePoolAutoscaling

    LocationPolicy string

    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    MaxNodeCount int

    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.

    MinNodeCount int

    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.

    TotalMaxNodeCount int

    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    TotalMinNodeCount int

    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    LocationPolicy string

    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    MaxNodeCount int

    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.

    MinNodeCount int

    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.

    TotalMaxNodeCount int

    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    TotalMinNodeCount int

    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    locationPolicy String

    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    maxNodeCount Integer

    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.

    minNodeCount Integer

    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.

    totalMaxNodeCount Integer

    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    totalMinNodeCount Integer

    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    locationPolicy string

    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    maxNodeCount number

    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.

    minNodeCount number

    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.

    totalMaxNodeCount number

    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    totalMinNodeCount number

    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    location_policy str

    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    max_node_count int

    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.

    min_node_count int

    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.

    total_max_node_count int

    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    total_min_node_count int

    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    locationPolicy String

    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    maxNodeCount Number

    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.

    minNodeCount Number

    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.

    totalMaxNodeCount Number

    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    totalMinNodeCount Number

    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    NodePoolManagement

    AutoRepair bool

    Whether the nodes will be automatically repaired.

    AutoUpgrade bool

    Whether the nodes will be automatically upgraded.

    AutoRepair bool

    Whether the nodes will be automatically repaired.

    AutoUpgrade bool

    Whether the nodes will be automatically upgraded.

    autoRepair Boolean

    Whether the nodes will be automatically repaired.

    autoUpgrade Boolean

    Whether the nodes will be automatically upgraded.

    autoRepair boolean

    Whether the nodes will be automatically repaired.

    autoUpgrade boolean

    Whether the nodes will be automatically upgraded.

    auto_repair bool

    Whether the nodes will be automatically repaired.

    auto_upgrade bool

    Whether the nodes will be automatically upgraded.

    autoRepair Boolean

    Whether the nodes will be automatically repaired.

    autoUpgrade Boolean

    Whether the nodes will be automatically upgraded.

    NodePoolNetworkConfig

    CreatePodRange bool

    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.

    EnablePrivateNodes bool

    Whether nodes have internal IP addresses only.

    PodCidrOverprovisionConfig NodePoolNetworkConfigPodCidrOverprovisionConfig
    PodIpv4CidrBlock string

    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.

    PodRange string

    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.

    CreatePodRange bool

    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.

    EnablePrivateNodes bool

    Whether nodes have internal IP addresses only.

    PodCidrOverprovisionConfig NodePoolNetworkConfigPodCidrOverprovisionConfig
    PodIpv4CidrBlock string

    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.

    PodRange string

    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.

    createPodRange Boolean

    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.

    enablePrivateNodes Boolean

    Whether nodes have internal IP addresses only.

    podCidrOverprovisionConfig NodePoolNetworkConfigPodCidrOverprovisionConfig
    podIpv4CidrBlock String

    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.

    podRange String

    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.

    createPodRange boolean

    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.

    enablePrivateNodes boolean

    Whether nodes have internal IP addresses only.

    podCidrOverprovisionConfig NodePoolNetworkConfigPodCidrOverprovisionConfig
    podIpv4CidrBlock string

    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.

    podRange string

    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.

    create_pod_range bool

    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.

    enable_private_nodes bool

    Whether nodes have internal IP addresses only.

    pod_cidr_overprovision_config NodePoolNetworkConfigPodCidrOverprovisionConfig
    pod_ipv4_cidr_block str

    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.

    pod_range str

    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.

    createPodRange Boolean

    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.

    enablePrivateNodes Boolean

    Whether nodes have internal IP addresses only.

    podCidrOverprovisionConfig Property Map
    podIpv4CidrBlock String

    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.

    podRange String

    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.

    NodePoolNetworkConfigPodCidrOverprovisionConfig

    disabled Boolean
    disabled boolean
    disabled Boolean

    NodePoolNodeConfig

    AdvancedMachineFeatures NodePoolNodeConfigAdvancedMachineFeatures
    BootDiskKmsKey string
    DiskSizeGb int
    DiskType string
    EphemeralStorageConfig NodePoolNodeConfigEphemeralStorageConfig
    EphemeralStorageLocalSsdConfig NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    GcfsConfig NodePoolNodeConfigGcfsConfig
    GuestAccelerators List<NodePoolNodeConfigGuestAccelerator>
    Gvnic NodePoolNodeConfigGvnic
    ImageType string
    KubeletConfig NodePoolNodeConfigKubeletConfig
    Labels Dictionary<string, string>
    LinuxNodeConfig NodePoolNodeConfigLinuxNodeConfig
    LocalNvmeSsdBlockConfig NodePoolNodeConfigLocalNvmeSsdBlockConfig
    LocalSsdCount int
    LoggingVariant string
    MachineType string
    Metadata Dictionary<string, string>
    MinCpuPlatform string
    NodeGroup string
    OauthScopes List<string>
    Preemptible bool
    ReservationAffinity NodePoolNodeConfigReservationAffinity
    ResourceLabels Dictionary<string, string>
    SandboxConfig NodePoolNodeConfigSandboxConfig
    ServiceAccount string
    ShieldedInstanceConfig NodePoolNodeConfigShieldedInstanceConfig
    Spot bool
    Tags List<string>
    Taints List<NodePoolNodeConfigTaint>
    WorkloadMetadataConfig NodePoolNodeConfigWorkloadMetadataConfig
    AdvancedMachineFeatures NodePoolNodeConfigAdvancedMachineFeatures
    BootDiskKmsKey string
    DiskSizeGb int
    DiskType string
    EphemeralStorageConfig NodePoolNodeConfigEphemeralStorageConfig
    EphemeralStorageLocalSsdConfig NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    GcfsConfig NodePoolNodeConfigGcfsConfig
    GuestAccelerators []NodePoolNodeConfigGuestAccelerator
    Gvnic NodePoolNodeConfigGvnic
    ImageType string
    KubeletConfig NodePoolNodeConfigKubeletConfig
    Labels map[string]string
    LinuxNodeConfig NodePoolNodeConfigLinuxNodeConfig
    LocalNvmeSsdBlockConfig NodePoolNodeConfigLocalNvmeSsdBlockConfig
    LocalSsdCount int
    LoggingVariant string
    MachineType string
    Metadata map[string]string
    MinCpuPlatform string
    NodeGroup string
    OauthScopes []string
    Preemptible bool
    ReservationAffinity NodePoolNodeConfigReservationAffinity
    ResourceLabels map[string]string
    SandboxConfig NodePoolNodeConfigSandboxConfig
    ServiceAccount string
    ShieldedInstanceConfig NodePoolNodeConfigShieldedInstanceConfig
    Spot bool
    Tags []string
    Taints []NodePoolNodeConfigTaint
    WorkloadMetadataConfig NodePoolNodeConfigWorkloadMetadataConfig
    advancedMachineFeatures NodePoolNodeConfigAdvancedMachineFeatures
    bootDiskKmsKey String
    diskSizeGb Integer
    diskType String
    ephemeralStorageConfig NodePoolNodeConfigEphemeralStorageConfig
    ephemeralStorageLocalSsdConfig NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    gcfsConfig NodePoolNodeConfigGcfsConfig
    guestAccelerators List<NodePoolNodeConfigGuestAccelerator>
    gvnic NodePoolNodeConfigGvnic
    imageType String
    kubeletConfig NodePoolNodeConfigKubeletConfig
    labels Map<String,String>
    linuxNodeConfig NodePoolNodeConfigLinuxNodeConfig
    localNvmeSsdBlockConfig NodePoolNodeConfigLocalNvmeSsdBlockConfig
    localSsdCount Integer
    loggingVariant String
    machineType String
    metadata Map<String,String>
    minCpuPlatform String
    nodeGroup String
    oauthScopes List<String>
    preemptible Boolean
    reservationAffinity NodePoolNodeConfigReservationAffinity
    resourceLabels Map<String,String>
    sandboxConfig NodePoolNodeConfigSandboxConfig
    serviceAccount String
    shieldedInstanceConfig NodePoolNodeConfigShieldedInstanceConfig
    spot Boolean
    tags List<String>
    taints List<NodePoolNodeConfigTaint>
    workloadMetadataConfig NodePoolNodeConfigWorkloadMetadataConfig
    advancedMachineFeatures NodePoolNodeConfigAdvancedMachineFeatures
    bootDiskKmsKey string
    diskSizeGb number
    diskType string
    ephemeralStorageConfig NodePoolNodeConfigEphemeralStorageConfig
    ephemeralStorageLocalSsdConfig NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    gcfsConfig NodePoolNodeConfigGcfsConfig
    guestAccelerators NodePoolNodeConfigGuestAccelerator[]
    gvnic NodePoolNodeConfigGvnic
    imageType string
    kubeletConfig NodePoolNodeConfigKubeletConfig
    labels {[key: string]: string}
    linuxNodeConfig NodePoolNodeConfigLinuxNodeConfig
    localNvmeSsdBlockConfig NodePoolNodeConfigLocalNvmeSsdBlockConfig
    localSsdCount number
    loggingVariant string
    machineType string
    metadata {[key: string]: string}
    minCpuPlatform string
    nodeGroup string
    oauthScopes string[]
    preemptible boolean
    reservationAffinity NodePoolNodeConfigReservationAffinity
    resourceLabels {[key: string]: string}
    sandboxConfig NodePoolNodeConfigSandboxConfig
    serviceAccount string
    shieldedInstanceConfig NodePoolNodeConfigShieldedInstanceConfig
    spot boolean
    tags string[]
    taints NodePoolNodeConfigTaint[]
    workloadMetadataConfig NodePoolNodeConfigWorkloadMetadataConfig
    advanced_machine_features NodePoolNodeConfigAdvancedMachineFeatures
    boot_disk_kms_key str
    disk_size_gb int
    disk_type str
    ephemeral_storage_config NodePoolNodeConfigEphemeralStorageConfig
    ephemeral_storage_local_ssd_config NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    gcfs_config NodePoolNodeConfigGcfsConfig
    guest_accelerators Sequence[NodePoolNodeConfigGuestAccelerator]
    gvnic NodePoolNodeConfigGvnic
    image_type str
    kubelet_config NodePoolNodeConfigKubeletConfig
    labels Mapping[str, str]
    linux_node_config NodePoolNodeConfigLinuxNodeConfig
    local_nvme_ssd_block_config NodePoolNodeConfigLocalNvmeSsdBlockConfig
    local_ssd_count int
    logging_variant str
    machine_type str
    metadata Mapping[str, str]
    min_cpu_platform str
    node_group str
    oauth_scopes Sequence[str]
    preemptible bool
    reservation_affinity NodePoolNodeConfigReservationAffinity
    resource_labels Mapping[str, str]
    sandbox_config NodePoolNodeConfigSandboxConfig
    service_account str
    shielded_instance_config NodePoolNodeConfigShieldedInstanceConfig
    spot bool
    tags Sequence[str]
    taints Sequence[NodePoolNodeConfigTaint]
    workload_metadata_config NodePoolNodeConfigWorkloadMetadataConfig

    NodePoolNodeConfigAdvancedMachineFeatures

    NodePoolNodeConfigEphemeralStorageConfig

    NodePoolNodeConfigEphemeralStorageLocalSsdConfig

    NodePoolNodeConfigGcfsConfig

    Enabled bool
    Enabled bool
    enabled Boolean
    enabled boolean
    enabled bool
    enabled Boolean

    NodePoolNodeConfigGuestAccelerator

    Count int
    Type string

    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.

    GpuPartitionSize string
    GpuSharingConfig NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    Count int
    Type string

    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.

    GpuPartitionSize string
    GpuSharingConfig NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    count Integer
    type String

    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.

    gpuPartitionSize String
    gpuSharingConfig NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    count number
    type string

    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.

    gpuPartitionSize string
    gpuSharingConfig NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    count int
    type str

    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.

    gpu_partition_size str
    gpu_sharing_config NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    count Number
    type String

    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.

    gpuPartitionSize String
    gpuSharingConfig Property Map

    NodePoolNodeConfigGuestAcceleratorGpuSharingConfig

    NodePoolNodeConfigGvnic

    Enabled bool
    Enabled bool
    enabled Boolean
    enabled boolean
    enabled bool
    enabled Boolean

    NodePoolNodeConfigKubeletConfig

    NodePoolNodeConfigLinuxNodeConfig

    Sysctls Dictionary<string, string>
    Sysctls map[string]string
    sysctls Map<String,String>
    sysctls {[key: string]: string}
    sysctls Mapping[str, str]
    sysctls Map<String>

    NodePoolNodeConfigLocalNvmeSsdBlockConfig

    NodePoolNodeConfigReservationAffinity

    ConsumeReservationType string
    Key string
    Values List<string>
    ConsumeReservationType string
    Key string
    Values []string
    consumeReservationType String
    key String
    values List<String>
    consumeReservationType string
    key string
    values string[]
    consume_reservation_type str
    key str
    values Sequence[str]
    consumeReservationType String
    key String
    values List<String>

    NodePoolNodeConfigSandboxConfig

    NodePoolNodeConfigShieldedInstanceConfig

    NodePoolNodeConfigTaint

    Effect string
    Key string
    Value string
    Effect string
    Key string
    Value string
    effect String
    key String
    value String
    effect string
    key string
    value string
    effect str
    key str
    value str
    effect String
    key String
    value String

    NodePoolNodeConfigWorkloadMetadataConfig

    Mode string
    Mode string
    mode String
    mode string
    mode str
    mode String

    NodePoolPlacementPolicy

    Type string

    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.

    Type string

    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.

    type String

    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.

    type string

    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.

    type str

    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.

    type String

    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.

    NodePoolUpgradeSettings

    BlueGreenSettings NodePoolUpgradeSettingsBlueGreenSettings

    The settings to adjust blue green upgrades. Structure is documented below

    MaxSurge int

    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.

    MaxUnavailable int

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    Strategy string

    The upgrade stragey to be used for upgrading the nodes.

    BlueGreenSettings NodePoolUpgradeSettingsBlueGreenSettings

    The settings to adjust blue green upgrades. Structure is documented below

    MaxSurge int

    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.

    MaxUnavailable int

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    Strategy string

    The upgrade stragey to be used for upgrading the nodes.

    blueGreenSettings NodePoolUpgradeSettingsBlueGreenSettings

    The settings to adjust blue green upgrades. Structure is documented below

    maxSurge Integer

    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.

    maxUnavailable Integer

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    strategy String

    The upgrade stragey to be used for upgrading the nodes.

    blueGreenSettings NodePoolUpgradeSettingsBlueGreenSettings

    The settings to adjust blue green upgrades. Structure is documented below

    maxSurge number

    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.

    maxUnavailable number

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    strategy string

    The upgrade stragey to be used for upgrading the nodes.

    blue_green_settings NodePoolUpgradeSettingsBlueGreenSettings

    The settings to adjust blue green upgrades. Structure is documented below

    max_surge int

    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.

    max_unavailable int

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    strategy str

    The upgrade stragey to be used for upgrading the nodes.

    blueGreenSettings Property Map

    The settings to adjust blue green upgrades. Structure is documented below

    maxSurge Number

    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.

    maxUnavailable Number

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    strategy String

    The upgrade stragey to be used for upgrading the nodes.

    NodePoolUpgradeSettingsBlueGreenSettings

    StandardRolloutPolicy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy

    Specifies the standard policy settings for blue-green upgrades.

    NodePoolSoakDuration string

    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.

    StandardRolloutPolicy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy

    Specifies the standard policy settings for blue-green upgrades.

    NodePoolSoakDuration string

    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.

    standardRolloutPolicy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy

    Specifies the standard policy settings for blue-green upgrades.

    nodePoolSoakDuration String

    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.

    standardRolloutPolicy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy

    Specifies the standard policy settings for blue-green upgrades.

    nodePoolSoakDuration string

    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.

    standard_rollout_policy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy

    Specifies the standard policy settings for blue-green upgrades.

    node_pool_soak_duration str

    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.

    standardRolloutPolicy Property Map

    Specifies the standard policy settings for blue-green upgrades.

    nodePoolSoakDuration String

    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.

    NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy

    BatchNodeCount int

    Number of blue nodes to drain in a batch.

    BatchPercentage double

    Percentage of the blue pool nodes to drain in a batch.

    BatchSoakDuration string

    Soak time after each batch gets drained.

    BatchNodeCount int

    Number of blue nodes to drain in a batch.

    BatchPercentage float64

    Percentage of the blue pool nodes to drain in a batch.

    BatchSoakDuration string

    Soak time after each batch gets drained.

    batchNodeCount Integer

    Number of blue nodes to drain in a batch.

    batchPercentage Double

    Percentage of the blue pool nodes to drain in a batch.

    batchSoakDuration String

    Soak time after each batch gets drained.

    batchNodeCount number

    Number of blue nodes to drain in a batch.

    batchPercentage number

    Percentage of the blue pool nodes to drain in a batch.

    batchSoakDuration string

    Soak time after each batch gets drained.

    batch_node_count int

    Number of blue nodes to drain in a batch.

    batch_percentage float

    Percentage of the blue pool nodes to drain in a batch.

    batch_soak_duration str

    Soak time after each batch gets drained.

    batchNodeCount Number

    Number of blue nodes to drain in a batch.

    batchPercentage Number

    Percentage of the blue pool nodes to drain in a batch.

    batchSoakDuration String

    Soak time after each batch gets drained.

    Import

    Node pools can be imported using the project, location, cluster and name. If the project is omitted, the project value in the provider configuration will be used. Examples

     $ pulumi import gcp:container/nodePool:NodePool mainpool my-gcp-project/us-east1-a/my-cluster/main-pool
    
     $ pulumi import gcp:container/nodePool:NodePool mainpool us-east1/my-cluster/main-pool
    

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes

    This Pulumi package is based on the google-beta Terraform Provider.

    gcp logo
    Google Cloud Classic v6.57.0 published on Tuesday, May 30, 2023 by Pulumi