gcp.container.NodePool
Explore with Pulumi AI
Manages a node pool in a Google Kubernetes Engine (GKE) cluster separately from the cluster control plane. For more information see the official documentation and the API reference.
Example Usage
Using A Separately Managed Node Pool (Recommended)
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var @default = new Gcp.ServiceAccount.Account("default", new()
{
AccountId = "service-account-id",
DisplayName = "Service Account",
});
var primary = new Gcp.Container.Cluster("primary", new()
{
Location = "us-central1",
RemoveDefaultNodePool = true,
InitialNodeCount = 1,
});
var primaryPreemptibleNodes = new Gcp.Container.NodePool("primaryPreemptibleNodes", new()
{
Cluster = primary.Id,
NodeCount = 1,
NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
{
Preemptible = true,
MachineType = "e2-medium",
ServiceAccount = @default.Email,
OauthScopes = new[]
{
"https://www.googleapis.com/auth/cloud-platform",
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/container"
"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/serviceAccount"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := serviceAccount.NewAccount(ctx, "default", &serviceAccount.AccountArgs{
AccountId: pulumi.String("service-account-id"),
DisplayName: pulumi.String("Service Account"),
})
if err != nil {
return err
}
primary, err := container.NewCluster(ctx, "primary", &container.ClusterArgs{
Location: pulumi.String("us-central1"),
RemoveDefaultNodePool: pulumi.Bool(true),
InitialNodeCount: pulumi.Int(1),
})
if err != nil {
return err
}
_, err = container.NewNodePool(ctx, "primaryPreemptibleNodes", &container.NodePoolArgs{
Cluster: primary.ID(),
NodeCount: pulumi.Int(1),
NodeConfig: &container.NodePoolNodeConfigArgs{
Preemptible: pulumi.Bool(true),
MachineType: pulumi.String("e2-medium"),
ServiceAccount: _default.Email,
OauthScopes: pulumi.StringArray{
pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.serviceAccount.Account;
import com.pulumi.gcp.serviceAccount.AccountArgs;
import com.pulumi.gcp.container.Cluster;
import com.pulumi.gcp.container.ClusterArgs;
import com.pulumi.gcp.container.NodePool;
import com.pulumi.gcp.container.NodePoolArgs;
import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_ = new Account("default", AccountArgs.builder()
.accountId("service-account-id")
.displayName("Service Account")
.build());
var primary = new Cluster("primary", ClusterArgs.builder()
.location("us-central1")
.removeDefaultNodePool(true)
.initialNodeCount(1)
.build());
var primaryPreemptibleNodes = new NodePool("primaryPreemptibleNodes", NodePoolArgs.builder()
.cluster(primary.id())
.nodeCount(1)
.nodeConfig(NodePoolNodeConfigArgs.builder()
.preemptible(true)
.machineType("e2-medium")
.serviceAccount(default_.email())
.oauthScopes("https://www.googleapis.com/auth/cloud-platform")
.build())
.build());
}
}
import pulumi
import pulumi_gcp as gcp
default = gcp.service_account.Account("default",
account_id="service-account-id",
display_name="Service Account")
primary = gcp.container.Cluster("primary",
location="us-central1",
remove_default_node_pool=True,
initial_node_count=1)
primary_preemptible_nodes = gcp.container.NodePool("primaryPreemptibleNodes",
cluster=primary.id,
node_count=1,
node_config=gcp.container.NodePoolNodeConfigArgs(
preemptible=True,
machine_type="e2-medium",
service_account=default.email,
oauth_scopes=["https://www.googleapis.com/auth/cloud-platform"],
))
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.serviceaccount.Account("default", {
accountId: "service-account-id",
displayName: "Service Account",
});
const primary = new gcp.container.Cluster("primary", {
location: "us-central1",
removeDefaultNodePool: true,
initialNodeCount: 1,
});
const primaryPreemptibleNodes = new gcp.container.NodePool("primaryPreemptibleNodes", {
cluster: primary.id,
nodeCount: 1,
nodeConfig: {
preemptible: true,
machineType: "e2-medium",
serviceAccount: _default.email,
oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
},
});
resources:
default:
type: gcp:serviceAccount:Account
properties:
accountId: service-account-id
displayName: Service Account
primary:
type: gcp:container:Cluster
properties:
location: us-central1
# We can't create a cluster with no node pool defined, but we want to only use
# # separately managed node pools. So we create the smallest possible default
# # node pool and immediately delete it.
removeDefaultNodePool: true
initialNodeCount: 1
primaryPreemptibleNodes:
type: gcp:container:NodePool
properties:
cluster: ${primary.id}
nodeCount: 1
nodeConfig:
preemptible: true
machineType: e2-medium
serviceAccount: ${default.email}
oauthScopes:
- https://www.googleapis.com/auth/cloud-platform
2 Node Pools, 1 Separately Managed + The Default Node Pool
Coming soon!
Coming soon!
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.serviceAccount.Account;
import com.pulumi.gcp.serviceAccount.AccountArgs;
import com.pulumi.gcp.container.Cluster;
import com.pulumi.gcp.container.ClusterArgs;
import com.pulumi.gcp.container.inputs.ClusterNodeConfigArgs;
import com.pulumi.gcp.container.NodePool;
import com.pulumi.gcp.container.NodePoolArgs;
import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_ = new Account("default", AccountArgs.builder()
.accountId("service-account-id")
.displayName("Service Account")
.build());
var primary = new Cluster("primary", ClusterArgs.builder()
.location("us-central1-a")
.initialNodeCount(3)
.nodeLocations("us-central1-c")
.nodeConfig(ClusterNodeConfigArgs.builder()
.serviceAccount(default_.email())
.oauthScopes("https://www.googleapis.com/auth/cloud-platform")
.guestAccelerators(ClusterNodeConfigGuestAcceleratorArgs.builder()
.type("nvidia-tesla-k80")
.count(1)
.build())
.build())
.build());
var np = new NodePool("np", NodePoolArgs.builder()
.cluster(primary.id())
.nodeConfig(NodePoolNodeConfigArgs.builder()
.machineType("e2-medium")
.serviceAccount(default_.email())
.oauthScopes("https://www.googleapis.com/auth/cloud-platform")
.build())
.timeouts(%!v(PANIC=Format method: runtime error: invalid memory address or nil pointer dereference))
.build());
}
}
Coming soon!
Coming soon!
resources:
default:
type: gcp:serviceAccount:Account
properties:
accountId: service-account-id
displayName: Service Account
np:
type: gcp:container:NodePool
properties:
cluster: ${primary.id}
nodeConfig:
machineType: e2-medium
serviceAccount: ${default.email}
oauthScopes:
- https://www.googleapis.com/auth/cloud-platform
timeouts:
- create: 30m
update: 20m
primary:
type: gcp:container:Cluster
properties:
location: us-central1-a
initialNodeCount: 3
nodeLocations:
- us-central1-c
nodeConfig:
serviceAccount: ${default.email}
oauthScopes:
- https://www.googleapis.com/auth/cloud-platform
guestAccelerators:
- type: nvidia-tesla-k80
count: 1
Create NodePool Resource
new NodePool(name: string, args: NodePoolArgs, opts?: CustomResourceOptions);
@overload
def NodePool(resource_name: str,
opts: Optional[ResourceOptions] = None,
autoscaling: Optional[NodePoolAutoscalingArgs] = None,
cluster: Optional[str] = None,
initial_node_count: Optional[int] = None,
location: Optional[str] = None,
management: Optional[NodePoolManagementArgs] = None,
max_pods_per_node: Optional[int] = None,
name: Optional[str] = None,
name_prefix: Optional[str] = None,
network_config: Optional[NodePoolNetworkConfigArgs] = None,
node_config: Optional[NodePoolNodeConfigArgs] = None,
node_count: Optional[int] = None,
node_locations: Optional[Sequence[str]] = None,
placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
project: Optional[str] = None,
upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
version: Optional[str] = None)
@overload
def NodePool(resource_name: str,
args: NodePoolArgs,
opts: Optional[ResourceOptions] = None)
func NewNodePool(ctx *Context, name string, args NodePoolArgs, opts ...ResourceOption) (*NodePool, error)
public NodePool(string name, NodePoolArgs args, CustomResourceOptions? opts = null)
public NodePool(String name, NodePoolArgs args)
public NodePool(String name, NodePoolArgs args, CustomResourceOptions options)
type: gcp:container:NodePool
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args NodePoolArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
NodePool Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The NodePool resource accepts the following input properties:
- Cluster string
The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster.- Autoscaling
Node
Pool Autoscaling Args Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Initial
Node intCount The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Location string
The location (region or zone) of the cluster.
- Management
Node
Pool Management Args Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- Max
Pods intPer Node The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name string
The name of the node pool. If left blank, the provider will auto-generate a unique name.
- Name
Prefix string Creates a unique name for the node pool beginning with the specified prefix. Conflicts with
name
.- Network
Config NodePool Network Config Args The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- Node
Config NodePool Node Config Args Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- Node
Count int The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside
autoscaling
.- Node
Locations List<string> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- Placement
Policy NodePool Placement Policy Args Specifies a custom placement policy for the nodes.
The
autoscaling
block supports (either total or per zone limits are required):- Project string
The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- Upgrade
Settings NodePool Upgrade Settings Args Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
The Kubernetes version for the nodes in this pool. Note that if this field and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- Cluster string
The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster.- Autoscaling
Node
Pool Autoscaling Args Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Initial
Node intCount The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Location string
The location (region or zone) of the cluster.
- Management
Node
Pool Management Args Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- Max
Pods intPer Node The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name string
The name of the node pool. If left blank, the provider will auto-generate a unique name.
- Name
Prefix string Creates a unique name for the node pool beginning with the specified prefix. Conflicts with
name
.- Network
Config NodePool Network Config Args The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- Node
Config NodePool Node Config Args Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- Node
Count int The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside
autoscaling
.- Node
Locations []string The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- Placement
Policy NodePool Placement Policy Args Specifies a custom placement policy for the nodes.
The
autoscaling
block supports (either total or per zone limits are required):- Project string
The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- Upgrade
Settings NodePool Upgrade Settings Args Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
The Kubernetes version for the nodes in this pool. Note that if this field and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- cluster String
The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster.- autoscaling
Node
Pool Autoscaling Args Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial
Node IntegerCount The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location String
The location (region or zone) of the cluster.
- management
Node
Pool Management Args Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods IntegerPer Node The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name String
The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix String Creates a unique name for the node pool beginning with the specified prefix. Conflicts with
name
.- network
Config NodePool Network Config Args The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config NodePool Node Config Args Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count Integer The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside
autoscaling
.- node
Locations List<String> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- placement
Policy NodePool Placement Policy Args Specifies a custom placement policy for the nodes.
The
autoscaling
block supports (either total or per zone limits are required):- project String
The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- upgrade
Settings NodePool Upgrade Settings Args Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
The Kubernetes version for the nodes in this pool. Note that if this field and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- cluster string
The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster.- autoscaling
Node
Pool Autoscaling Args Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial
Node numberCount The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location string
The location (region or zone) of the cluster.
- management
Node
Pool Management Args Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods numberPer Node The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name string
The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix string Creates a unique name for the node pool beginning with the specified prefix. Conflicts with
name
.- network
Config NodePool Network Config Args The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config NodePool Node Config Args Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count number The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside
autoscaling
.- node
Locations string[] The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- placement
Policy NodePool Placement Policy Args Specifies a custom placement policy for the nodes.
The
autoscaling
block supports (either total or per zone limits are required):- project string
The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- upgrade
Settings NodePool Upgrade Settings Args Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version string
The Kubernetes version for the nodes in this pool. Note that if this field and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- cluster str
The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster.- autoscaling
Node
Pool Autoscaling Args Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial_
node_ intcount The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location str
The location (region or zone) of the cluster.
- management
Node
Pool Management Args Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max_
pods_ intper_ node The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name str
The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name_
prefix str Creates a unique name for the node pool beginning with the specified prefix. Conflicts with
name
.- network_
config NodePool Network Config Args The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node_
config NodePool Node Config Args Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node_
count int The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside
autoscaling
.- node_
locations Sequence[str] The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- placement_
policy NodePool Placement Policy Args Specifies a custom placement policy for the nodes.
The
autoscaling
block supports (either total or per zone limits are required):- project str
The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- upgrade_
settings NodePool Upgrade Settings Args Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version str
The Kubernetes version for the nodes in this pool. Note that if this field and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- cluster String
The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster.- autoscaling Property Map
Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial
Node NumberCount The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location String
The location (region or zone) of the cluster.
- management Property Map
Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods NumberPer Node The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name String
The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix String Creates a unique name for the node pool beginning with the specified prefix. Conflicts with
name
.- network
Config Property Map The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config Property Map Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count Number The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside
autoscaling
.- node
Locations List<String> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- placement
Policy Property Map Specifies a custom placement policy for the nodes.
The
autoscaling
block supports (either total or per zone limits are required):- project String
The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- upgrade
Settings Property Map Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
The Kubernetes version for the nodes in this pool. Note that if this field and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
Outputs
All input properties are implicitly available as output properties. Additionally, the NodePool resource produces the following output properties:
- Id string
The provider-assigned unique ID for this managed resource.
- Instance
Group List<string>Urls The resource URLs of the managed instance groups associated with this node pool.
- Managed
Instance List<string>Group Urls List of instance group URLs which have been assigned to this node pool.
- Operation string
- Id string
The provider-assigned unique ID for this managed resource.
- Instance
Group []stringUrls The resource URLs of the managed instance groups associated with this node pool.
- Managed
Instance []stringGroup Urls List of instance group URLs which have been assigned to this node pool.
- Operation string
- id String
The provider-assigned unique ID for this managed resource.
- instance
Group List<String>Urls The resource URLs of the managed instance groups associated with this node pool.
- managed
Instance List<String>Group Urls List of instance group URLs which have been assigned to this node pool.
- operation String
- id string
The provider-assigned unique ID for this managed resource.
- instance
Group string[]Urls The resource URLs of the managed instance groups associated with this node pool.
- managed
Instance string[]Group Urls List of instance group URLs which have been assigned to this node pool.
- operation string
- id str
The provider-assigned unique ID for this managed resource.
- instance_
group_ Sequence[str]urls The resource URLs of the managed instance groups associated with this node pool.
- managed_
instance_ Sequence[str]group_ urls List of instance group URLs which have been assigned to this node pool.
- operation str
- id String
The provider-assigned unique ID for this managed resource.
- instance
Group List<String>Urls The resource URLs of the managed instance groups associated with this node pool.
- managed
Instance List<String>Group Urls List of instance group URLs which have been assigned to this node pool.
- operation String
Look up Existing NodePool Resource
Get an existing NodePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: NodePoolState, opts?: CustomResourceOptions): NodePool
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
autoscaling: Optional[NodePoolAutoscalingArgs] = None,
cluster: Optional[str] = None,
initial_node_count: Optional[int] = None,
instance_group_urls: Optional[Sequence[str]] = None,
location: Optional[str] = None,
managed_instance_group_urls: Optional[Sequence[str]] = None,
management: Optional[NodePoolManagementArgs] = None,
max_pods_per_node: Optional[int] = None,
name: Optional[str] = None,
name_prefix: Optional[str] = None,
network_config: Optional[NodePoolNetworkConfigArgs] = None,
node_config: Optional[NodePoolNodeConfigArgs] = None,
node_count: Optional[int] = None,
node_locations: Optional[Sequence[str]] = None,
operation: Optional[str] = None,
placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
project: Optional[str] = None,
upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
version: Optional[str] = None) -> NodePool
func GetNodePool(ctx *Context, name string, id IDInput, state *NodePoolState, opts ...ResourceOption) (*NodePool, error)
public static NodePool Get(string name, Input<string> id, NodePoolState? state, CustomResourceOptions? opts = null)
public static NodePool get(String name, Output<String> id, NodePoolState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Autoscaling
Node
Pool Autoscaling Args Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Cluster string
The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster.- Initial
Node intCount The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Instance
Group List<string>Urls The resource URLs of the managed instance groups associated with this node pool.
- Location string
The location (region or zone) of the cluster.
- Managed
Instance List<string>Group Urls List of instance group URLs which have been assigned to this node pool.
- Management
Node
Pool Management Args Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- Max
Pods intPer Node The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name string
The name of the node pool. If left blank, the provider will auto-generate a unique name.
- Name
Prefix string Creates a unique name for the node pool beginning with the specified prefix. Conflicts with
name
.- Network
Config NodePool Network Config Args The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- Node
Config NodePool Node Config Args Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- Node
Count int The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside
autoscaling
.- Node
Locations List<string> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- Operation string
- Placement
Policy NodePool Placement Policy Args Specifies a custom placement policy for the nodes.
The
autoscaling
block supports (either total or per zone limits are required):- Project string
The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- Upgrade
Settings NodePool Upgrade Settings Args Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
The Kubernetes version for the nodes in this pool. Note that if this field and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- Autoscaling
Node
Pool Autoscaling Args Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Cluster string
The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster.- Initial
Node intCount The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Instance
Group []stringUrls The resource URLs of the managed instance groups associated with this node pool.
- Location string
The location (region or zone) of the cluster.
- Managed
Instance []stringGroup Urls List of instance group URLs which have been assigned to this node pool.
- Management
Node
Pool Management Args Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- Max
Pods intPer Node The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name string
The name of the node pool. If left blank, the provider will auto-generate a unique name.
- Name
Prefix string Creates a unique name for the node pool beginning with the specified prefix. Conflicts with
name
.- Network
Config NodePool Network Config Args The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- Node
Config NodePool Node Config Args Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- Node
Count int The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside
autoscaling
.- Node
Locations []string The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- Operation string
- Placement
Policy NodePool Placement Policy Args Specifies a custom placement policy for the nodes.
The
autoscaling
block supports (either total or per zone limits are required):- Project string
The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- Upgrade
Settings NodePool Upgrade Settings Args Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
The Kubernetes version for the nodes in this pool. Note that if this field and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- autoscaling
Node
Pool Autoscaling Args Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster String
The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster.- initial
Node IntegerCount The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance
Group List<String>Urls The resource URLs of the managed instance groups associated with this node pool.
- location String
The location (region or zone) of the cluster.
- managed
Instance List<String>Group Urls List of instance group URLs which have been assigned to this node pool.
- management
Node
Pool Management Args Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods IntegerPer Node The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name String
The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix String Creates a unique name for the node pool beginning with the specified prefix. Conflicts with
name
.- network
Config NodePool Network Config Args The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config NodePool Node Config Args Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count Integer The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside
autoscaling
.- node
Locations List<String> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- operation String
- placement
Policy NodePool Placement Policy Args Specifies a custom placement policy for the nodes.
The
autoscaling
block supports (either total or per zone limits are required):- project String
The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- upgrade
Settings NodePool Upgrade Settings Args Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
The Kubernetes version for the nodes in this pool. Note that if this field and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- autoscaling
Node
Pool Autoscaling Args Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster string
The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster.- initial
Node numberCount The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance
Group string[]Urls The resource URLs of the managed instance groups associated with this node pool.
- location string
The location (region or zone) of the cluster.
- managed
Instance string[]Group Urls List of instance group URLs which have been assigned to this node pool.
- management
Node
Pool Management Args Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods numberPer Node The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name string
The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix string Creates a unique name for the node pool beginning with the specified prefix. Conflicts with
name
.- network
Config NodePool Network Config Args The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config NodePool Node Config Args Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count number The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside
autoscaling
.- node
Locations string[] The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- operation string
- placement
Policy NodePool Placement Policy Args Specifies a custom placement policy for the nodes.
The
autoscaling
block supports (either total or per zone limits are required):- project string
The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- upgrade
Settings NodePool Upgrade Settings Args Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version string
The Kubernetes version for the nodes in this pool. Note that if this field and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- autoscaling
Node
Pool Autoscaling Args Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster str
The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster.- initial_
node_ intcount The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance_
group_ Sequence[str]urls The resource URLs of the managed instance groups associated with this node pool.
- location str
The location (region or zone) of the cluster.
- managed_
instance_ Sequence[str]group_ urls List of instance group URLs which have been assigned to this node pool.
- management
Node
Pool Management Args Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max_
pods_ intper_ node The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name str
The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name_
prefix str Creates a unique name for the node pool beginning with the specified prefix. Conflicts with
name
.- network_
config NodePool Network Config Args The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node_
config NodePool Node Config Args Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node_
count int The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside
autoscaling
.- node_
locations Sequence[str] The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- operation str
- placement_
policy NodePool Placement Policy Args Specifies a custom placement policy for the nodes.
The
autoscaling
block supports (either total or per zone limits are required):- project str
The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- upgrade_
settings NodePool Upgrade Settings Args Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version str
The Kubernetes version for the nodes in this pool. Note that if this field and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- autoscaling Property Map
Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster String
The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster.- initial
Node NumberCount The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance
Group List<String>Urls The resource URLs of the managed instance groups associated with this node pool.
- location String
The location (region or zone) of the cluster.
- managed
Instance List<String>Group Urls List of instance group URLs which have been assigned to this node pool.
- management Property Map
Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods NumberPer Node The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name String
The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix String Creates a unique name for the node pool beginning with the specified prefix. Conflicts with
name
.- network
Config Property Map The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config Property Map Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count Number The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside
autoscaling
.- node
Locations List<String> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- operation String
- placement
Policy Property Map Specifies a custom placement policy for the nodes.
The
autoscaling
block supports (either total or per zone limits are required):- project String
The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- upgrade
Settings Property Map Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
The Kubernetes version for the nodes in this pool. Note that if this field and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
Supporting Types
NodePoolAutoscaling
- Location
Policy string Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- Max
Node intCount Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- Min
Node intCount Minimum number of nodes per zone in the NodePool. Must be >=0 and <=
max_node_count
. Cannot be used with total limits.- Total
Max intNode Count Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- Total
Min intNode Count Total minimum number of nodes in the NodePool. Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- Location
Policy string Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- Max
Node intCount Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- Min
Node intCount Minimum number of nodes per zone in the NodePool. Must be >=0 and <=
max_node_count
. Cannot be used with total limits.- Total
Max intNode Count Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- Total
Min intNode Count Total minimum number of nodes in the NodePool. Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location
Policy String Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- max
Node IntegerCount Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min
Node IntegerCount Minimum number of nodes per zone in the NodePool. Must be >=0 and <=
max_node_count
. Cannot be used with total limits.- total
Max IntegerNode Count Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total
Min IntegerNode Count Total minimum number of nodes in the NodePool. Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location
Policy string Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- max
Node numberCount Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min
Node numberCount Minimum number of nodes per zone in the NodePool. Must be >=0 and <=
max_node_count
. Cannot be used with total limits.- total
Max numberNode Count Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total
Min numberNode Count Total minimum number of nodes in the NodePool. Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location_
policy str Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- max_
node_ intcount Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min_
node_ intcount Minimum number of nodes per zone in the NodePool. Must be >=0 and <=
max_node_count
. Cannot be used with total limits.- total_
max_ intnode_ count Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total_
min_ intnode_ count Total minimum number of nodes in the NodePool. Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location
Policy String Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- max
Node NumberCount Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min
Node NumberCount Minimum number of nodes per zone in the NodePool. Must be >=0 and <=
max_node_count
. Cannot be used with total limits.- total
Max NumberNode Count Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total
Min NumberNode Count Total minimum number of nodes in the NodePool. Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
NodePoolManagement
- Auto
Repair bool Whether the nodes will be automatically repaired.
- Auto
Upgrade bool Whether the nodes will be automatically upgraded.
- Auto
Repair bool Whether the nodes will be automatically repaired.
- Auto
Upgrade bool Whether the nodes will be automatically upgraded.
- auto
Repair Boolean Whether the nodes will be automatically repaired.
- auto
Upgrade Boolean Whether the nodes will be automatically upgraded.
- auto
Repair boolean Whether the nodes will be automatically repaired.
- auto
Upgrade boolean Whether the nodes will be automatically upgraded.
- auto_
repair bool Whether the nodes will be automatically repaired.
- auto_
upgrade bool Whether the nodes will be automatically upgraded.
- auto
Repair Boolean Whether the nodes will be automatically repaired.
- auto
Upgrade Boolean Whether the nodes will be automatically upgraded.
NodePoolNetworkConfig
- Create
Pod boolRange Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified.- Enable
Private boolNodes Whether nodes have internal IP addresses only.
- Pod
Cidr NodeOverprovision Config Pool Network Config Pod Cidr Overprovision Config - Pod
Ipv4Cidr stringBlock The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- Pod
Range string The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- Create
Pod boolRange Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified.- Enable
Private boolNodes Whether nodes have internal IP addresses only.
- Pod
Cidr NodeOverprovision Config Pool Network Config Pod Cidr Overprovision Config - Pod
Ipv4Cidr stringBlock The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- Pod
Range string The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- create
Pod BooleanRange Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified.- enable
Private BooleanNodes Whether nodes have internal IP addresses only.
- pod
Cidr NodeOverprovision Config Pool Network Config Pod Cidr Overprovision Config - pod
Ipv4Cidr StringBlock The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod
Range String The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- create
Pod booleanRange Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified.- enable
Private booleanNodes Whether nodes have internal IP addresses only.
- pod
Cidr NodeOverprovision Config Pool Network Config Pod Cidr Overprovision Config - pod
Ipv4Cidr stringBlock The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod
Range string The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- create_
pod_ boolrange Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified.- enable_
private_ boolnodes Whether nodes have internal IP addresses only.
- pod_
cidr_ Nodeoverprovision_ config Pool Network Config Pod Cidr Overprovision Config - pod_
ipv4_ strcidr_ block The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod_
range str The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- create
Pod BooleanRange Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified.- enable
Private BooleanNodes Whether nodes have internal IP addresses only.
- pod
Cidr Property MapOverprovision Config - pod
Ipv4Cidr StringBlock The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod
Range String The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
NodePoolNetworkConfigPodCidrOverprovisionConfig
- Disabled bool
- Disabled bool
- disabled Boolean
- disabled boolean
- disabled bool
- disabled Boolean
NodePoolNodeConfig
- Advanced
Machine NodeFeatures Pool Node Config Advanced Machine Features - Boot
Disk stringKms Key - Disk
Size intGb - Disk
Type string - Ephemeral
Storage NodeConfig Pool Node Config Ephemeral Storage Config - Ephemeral
Storage NodeLocal Ssd Config Pool Node Config Ephemeral Storage Local Ssd Config - Gcfs
Config NodePool Node Config Gcfs Config - Guest
Accelerators List<NodePool Node Config Guest Accelerator> - Gvnic
Node
Pool Node Config Gvnic - Image
Type string - Kubelet
Config NodePool Node Config Kubelet Config - Labels Dictionary<string, string>
- Linux
Node NodeConfig Pool Node Config Linux Node Config - Local
Nvme NodeSsd Block Config Pool Node Config Local Nvme Ssd Block Config - Local
Ssd intCount - Logging
Variant string - Machine
Type string - Metadata Dictionary<string, string>
- Min
Cpu stringPlatform - Node
Group string - Oauth
Scopes List<string> - Preemptible bool
- Reservation
Affinity NodePool Node Config Reservation Affinity - Resource
Labels Dictionary<string, string> - Sandbox
Config NodePool Node Config Sandbox Config - Service
Account string - Shielded
Instance NodeConfig Pool Node Config Shielded Instance Config - Spot bool
- List<string>
- Taints
List<Node
Pool Node Config Taint> - Workload
Metadata NodeConfig Pool Node Config Workload Metadata Config
- Advanced
Machine NodeFeatures Pool Node Config Advanced Machine Features - Boot
Disk stringKms Key - Disk
Size intGb - Disk
Type string - Ephemeral
Storage NodeConfig Pool Node Config Ephemeral Storage Config - Ephemeral
Storage NodeLocal Ssd Config Pool Node Config Ephemeral Storage Local Ssd Config - Gcfs
Config NodePool Node Config Gcfs Config - Guest
Accelerators []NodePool Node Config Guest Accelerator - Gvnic
Node
Pool Node Config Gvnic - Image
Type string - Kubelet
Config NodePool Node Config Kubelet Config - Labels map[string]string
- Linux
Node NodeConfig Pool Node Config Linux Node Config - Local
Nvme NodeSsd Block Config Pool Node Config Local Nvme Ssd Block Config - Local
Ssd intCount - Logging
Variant string - Machine
Type string - Metadata map[string]string
- Min
Cpu stringPlatform - Node
Group string - Oauth
Scopes []string - Preemptible bool
- Reservation
Affinity NodePool Node Config Reservation Affinity - Resource
Labels map[string]string - Sandbox
Config NodePool Node Config Sandbox Config - Service
Account string - Shielded
Instance NodeConfig Pool Node Config Shielded Instance Config - Spot bool
- []string
- Taints
[]Node
Pool Node Config Taint - Workload
Metadata NodeConfig Pool Node Config Workload Metadata Config
- advanced
Machine NodeFeatures Pool Node Config Advanced Machine Features - boot
Disk StringKms Key - disk
Size IntegerGb - disk
Type String - ephemeral
Storage NodeConfig Pool Node Config Ephemeral Storage Config - ephemeral
Storage NodeLocal Ssd Config Pool Node Config Ephemeral Storage Local Ssd Config - gcfs
Config NodePool Node Config Gcfs Config - guest
Accelerators List<NodePool Node Config Guest Accelerator> - gvnic
Node
Pool Node Config Gvnic - image
Type String - kubelet
Config NodePool Node Config Kubelet Config - labels Map<String,String>
- linux
Node NodeConfig Pool Node Config Linux Node Config - local
Nvme NodeSsd Block Config Pool Node Config Local Nvme Ssd Block Config - local
Ssd IntegerCount - logging
Variant String - machine
Type String - metadata Map<String,String>
- min
Cpu StringPlatform - node
Group String - oauth
Scopes List<String> - preemptible Boolean
- reservation
Affinity NodePool Node Config Reservation Affinity - resource
Labels Map<String,String> - sandbox
Config NodePool Node Config Sandbox Config - service
Account String - shielded
Instance NodeConfig Pool Node Config Shielded Instance Config - spot Boolean
- List<String>
- taints
List<Node
Pool Node Config Taint> - workload
Metadata NodeConfig Pool Node Config Workload Metadata Config
- advanced
Machine NodeFeatures Pool Node Config Advanced Machine Features - boot
Disk stringKms Key - disk
Size numberGb - disk
Type string - ephemeral
Storage NodeConfig Pool Node Config Ephemeral Storage Config - ephemeral
Storage NodeLocal Ssd Config Pool Node Config Ephemeral Storage Local Ssd Config - gcfs
Config NodePool Node Config Gcfs Config - guest
Accelerators NodePool Node Config Guest Accelerator[] - gvnic
Node
Pool Node Config Gvnic - image
Type string - kubelet
Config NodePool Node Config Kubelet Config - labels {[key: string]: string}
- linux
Node NodeConfig Pool Node Config Linux Node Config - local
Nvme NodeSsd Block Config Pool Node Config Local Nvme Ssd Block Config - local
Ssd numberCount - logging
Variant string - machine
Type string - metadata {[key: string]: string}
- min
Cpu stringPlatform - node
Group string - oauth
Scopes string[] - preemptible boolean
- reservation
Affinity NodePool Node Config Reservation Affinity - resource
Labels {[key: string]: string} - sandbox
Config NodePool Node Config Sandbox Config - service
Account string - shielded
Instance NodeConfig Pool Node Config Shielded Instance Config - spot boolean
- string[]
- taints
Node
Pool Node Config Taint[] - workload
Metadata NodeConfig Pool Node Config Workload Metadata Config
- advanced_
machine_ Nodefeatures Pool Node Config Advanced Machine Features - boot_
disk_ strkms_ key - disk_
size_ intgb - disk_
type str - ephemeral_
storage_ Nodeconfig Pool Node Config Ephemeral Storage Config - ephemeral_
storage_ Nodelocal_ ssd_ config Pool Node Config Ephemeral Storage Local Ssd Config - gcfs_
config NodePool Node Config Gcfs Config - guest_
accelerators Sequence[NodePool Node Config Guest Accelerator] - gvnic
Node
Pool Node Config Gvnic - image_
type str - kubelet_
config NodePool Node Config Kubelet Config - labels Mapping[str, str]
- linux_
node_ Nodeconfig Pool Node Config Linux Node Config - local_
nvme_ Nodessd_ block_ config Pool Node Config Local Nvme Ssd Block Config - local_
ssd_ intcount - logging_
variant str - machine_
type str - metadata Mapping[str, str]
- min_
cpu_ strplatform - node_
group str - oauth_
scopes Sequence[str] - preemptible bool
- reservation_
affinity NodePool Node Config Reservation Affinity - resource_
labels Mapping[str, str] - sandbox_
config NodePool Node Config Sandbox Config - service_
account str - shielded_
instance_ Nodeconfig Pool Node Config Shielded Instance Config - spot bool
- Sequence[str]
- taints
Sequence[Node
Pool Node Config Taint] - workload_
metadata_ Nodeconfig Pool Node Config Workload Metadata Config
- advanced
Machine Property MapFeatures - boot
Disk StringKms Key - disk
Size NumberGb - disk
Type String - ephemeral
Storage Property MapConfig - ephemeral
Storage Property MapLocal Ssd Config - gcfs
Config Property Map - guest
Accelerators List<Property Map> - gvnic Property Map
- image
Type String - kubelet
Config Property Map - labels Map<String>
- linux
Node Property MapConfig - local
Nvme Property MapSsd Block Config - local
Ssd NumberCount - logging
Variant String - machine
Type String - metadata Map<String>
- min
Cpu StringPlatform - node
Group String - oauth
Scopes List<String> - preemptible Boolean
- reservation
Affinity Property Map - resource
Labels Map<String> - sandbox
Config Property Map - service
Account String - shielded
Instance Property MapConfig - spot Boolean
- List<String>
- taints List<Property Map>
- workload
Metadata Property MapConfig
NodePoolNodeConfigAdvancedMachineFeatures
- Threads
Per intCore
- Threads
Per intCore
- threads
Per IntegerCore
- threads
Per numberCore
- threads_
per_ intcore
- threads
Per NumberCore
NodePoolNodeConfigEphemeralStorageConfig
- Local
Ssd intCount
- Local
Ssd intCount
- local
Ssd IntegerCount
- local
Ssd numberCount
- local_
ssd_ intcount
- local
Ssd NumberCount
NodePoolNodeConfigEphemeralStorageLocalSsdConfig
- Local
Ssd intCount
- Local
Ssd intCount
- local
Ssd IntegerCount
- local
Ssd numberCount
- local_
ssd_ intcount
- local
Ssd NumberCount
NodePoolNodeConfigGcfsConfig
- Enabled bool
- Enabled bool
- enabled Boolean
- enabled boolean
- enabled bool
- enabled Boolean
NodePoolNodeConfigGuestAccelerator
- Count int
- Type string
The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- Gpu
Partition stringSize - Gpu
Sharing NodeConfig Pool Node Config Guest Accelerator Gpu Sharing Config
- Count int
- Type string
The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- Gpu
Partition stringSize - Gpu
Sharing NodeConfig Pool Node Config Guest Accelerator Gpu Sharing Config
- count Integer
- type String
The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- gpu
Partition StringSize - gpu
Sharing NodeConfig Pool Node Config Guest Accelerator Gpu Sharing Config
- count number
- type string
The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- gpu
Partition stringSize - gpu
Sharing NodeConfig Pool Node Config Guest Accelerator Gpu Sharing Config
- count int
- type str
The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- gpu_
partition_ strsize - gpu_
sharing_ Nodeconfig Pool Node Config Guest Accelerator Gpu Sharing Config
- count Number
- type String
The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- gpu
Partition StringSize - gpu
Sharing Property MapConfig
NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
- Gpu
Sharing stringStrategy - int
- Gpu
Sharing stringStrategy - int
- gpu
Sharing StringStrategy - Integer
- gpu
Sharing stringStrategy - number
- gpu_
sharing_ strstrategy - int
- gpu
Sharing StringStrategy - Number
NodePoolNodeConfigGvnic
- Enabled bool
- Enabled bool
- enabled Boolean
- enabled boolean
- enabled bool
- enabled Boolean
NodePoolNodeConfigKubeletConfig
- Cpu
Manager stringPolicy - Cpu
Cfs boolQuota - Cpu
Cfs stringQuota Period - Pod
Pids intLimit
- Cpu
Manager stringPolicy - Cpu
Cfs boolQuota - Cpu
Cfs stringQuota Period - Pod
Pids intLimit
- cpu
Manager StringPolicy - cpu
Cfs BooleanQuota - cpu
Cfs StringQuota Period - pod
Pids IntegerLimit
- cpu
Manager stringPolicy - cpu
Cfs booleanQuota - cpu
Cfs stringQuota Period - pod
Pids numberLimit
- cpu_
manager_ strpolicy - cpu_
cfs_ boolquota - cpu_
cfs_ strquota_ period - pod_
pids_ intlimit
- cpu
Manager StringPolicy - cpu
Cfs BooleanQuota - cpu
Cfs StringQuota Period - pod
Pids NumberLimit
NodePoolNodeConfigLinuxNodeConfig
- Sysctls Dictionary<string, string>
- Sysctls map[string]string
- sysctls Map<String,String>
- sysctls {[key: string]: string}
- sysctls Mapping[str, str]
- sysctls Map<String>
NodePoolNodeConfigLocalNvmeSsdBlockConfig
- Local
Ssd intCount
- Local
Ssd intCount
- local
Ssd IntegerCount
- local
Ssd numberCount
- local_
ssd_ intcount
- local
Ssd NumberCount
NodePoolNodeConfigReservationAffinity
- Consume
Reservation stringType - Key string
- Values List<string>
- Consume
Reservation stringType - Key string
- Values []string
- consume
Reservation StringType - key String
- values List<String>
- consume
Reservation stringType - key string
- values string[]
- consume_
reservation_ strtype - key str
- values Sequence[str]
- consume
Reservation StringType - key String
- values List<String>
NodePoolNodeConfigSandboxConfig
- Sandbox
Type string
- Sandbox
Type string
- sandbox
Type String
- sandbox
Type string
- sandbox_
type str
- sandbox
Type String
NodePoolNodeConfigShieldedInstanceConfig
- enable
Integrity BooleanMonitoring - enable
Secure BooleanBoot
- enable
Integrity booleanMonitoring - enable
Secure booleanBoot
- enable
Integrity BooleanMonitoring - enable
Secure BooleanBoot
NodePoolNodeConfigTaint
NodePoolNodeConfigWorkloadMetadataConfig
- Mode string
- Mode string
- mode String
- mode string
- mode str
- mode String
NodePoolPlacementPolicy
- Type string
The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- Type string
The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- type String
The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- type string
The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- type str
The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- type String
The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
NodePoolUpgradeSettings
- Blue
Green NodeSettings Pool Upgrade Settings Blue Green Settings The settings to adjust blue green upgrades. Structure is documented below
- Max
Surge int The number of additional nodes that can be added to the node pool during an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.- int
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- Strategy string
The upgrade stragey to be used for upgrading the nodes.
- Blue
Green NodeSettings Pool Upgrade Settings Blue Green Settings The settings to adjust blue green upgrades. Structure is documented below
- Max
Surge int The number of additional nodes that can be added to the node pool during an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.- int
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- Strategy string
The upgrade stragey to be used for upgrading the nodes.
- blue
Green NodeSettings Pool Upgrade Settings Blue Green Settings The settings to adjust blue green upgrades. Structure is documented below
- max
Surge Integer The number of additional nodes that can be added to the node pool during an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.- Integer
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- strategy String
The upgrade stragey to be used for upgrading the nodes.
- blue
Green NodeSettings Pool Upgrade Settings Blue Green Settings The settings to adjust blue green upgrades. Structure is documented below
- max
Surge number The number of additional nodes that can be added to the node pool during an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.- number
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- strategy string
The upgrade stragey to be used for upgrading the nodes.
- blue_
green_ Nodesettings Pool Upgrade Settings Blue Green Settings The settings to adjust blue green upgrades. Structure is documented below
- max_
surge int The number of additional nodes that can be added to the node pool during an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.- int
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- strategy str
The upgrade stragey to be used for upgrading the nodes.
- blue
Green Property MapSettings The settings to adjust blue green upgrades. Structure is documented below
- max
Surge Number The number of additional nodes that can be added to the node pool during an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.- Number
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- strategy String
The upgrade stragey to be used for upgrading the nodes.
NodePoolUpgradeSettingsBlueGreenSettings
- Standard
Rollout NodePolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy Specifies the standard policy settings for blue-green upgrades.
- Node
Pool stringSoak Duration Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- Standard
Rollout NodePolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy Specifies the standard policy settings for blue-green upgrades.
- Node
Pool stringSoak Duration Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard
Rollout NodePolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy Specifies the standard policy settings for blue-green upgrades.
- node
Pool StringSoak Duration Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard
Rollout NodePolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy Specifies the standard policy settings for blue-green upgrades.
- node
Pool stringSoak Duration Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard_
rollout_ Nodepolicy Pool Upgrade Settings Blue Green Settings Standard Rollout Policy Specifies the standard policy settings for blue-green upgrades.
- node_
pool_ strsoak_ duration Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard
Rollout Property MapPolicy Specifies the standard policy settings for blue-green upgrades.
- node
Pool StringSoak Duration Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy
- Batch
Node intCount Number of blue nodes to drain in a batch.
- Batch
Percentage double Percentage of the blue pool nodes to drain in a batch.
- Batch
Soak stringDuration Soak time after each batch gets drained.
- Batch
Node intCount Number of blue nodes to drain in a batch.
- Batch
Percentage float64 Percentage of the blue pool nodes to drain in a batch.
- Batch
Soak stringDuration Soak time after each batch gets drained.
- batch
Node IntegerCount Number of blue nodes to drain in a batch.
- batch
Percentage Double Percentage of the blue pool nodes to drain in a batch.
- batch
Soak StringDuration Soak time after each batch gets drained.
- batch
Node numberCount Number of blue nodes to drain in a batch.
- batch
Percentage number Percentage of the blue pool nodes to drain in a batch.
- batch
Soak stringDuration Soak time after each batch gets drained.
- batch_
node_ intcount Number of blue nodes to drain in a batch.
- batch_
percentage float Percentage of the blue pool nodes to drain in a batch.
- batch_
soak_ strduration Soak time after each batch gets drained.
- batch
Node NumberCount Number of blue nodes to drain in a batch.
- batch
Percentage Number Percentage of the blue pool nodes to drain in a batch.
- batch
Soak StringDuration Soak time after each batch gets drained.
Import
Node pools can be imported using the project
, location
, cluster
and name
. If the project is omitted, the project value in the provider configuration will be used. Examples
$ pulumi import gcp:container/nodePool:NodePool mainpool my-gcp-project/us-east1-a/my-cluster/main-pool
$ pulumi import gcp:container/nodePool:NodePool mainpool us-east1/my-cluster/main-pool
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
This Pulumi package is based on the
google-beta
Terraform Provider.