published on Thursday, Mar 12, 2026 by Pulumi
published on Thursday, Mar 12, 2026 by Pulumi
Use this data source to get details about a cluster resource.
To get more information about private cloud cluster, see:
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const myCluster = gcp.vmwareengine.getCluster({
name: "my-cluster",
parent: "project/locations/us-west1-a/privateClouds/my-cloud",
});
import pulumi
import pulumi_gcp as gcp
my_cluster = gcp.vmwareengine.get_cluster(name="my-cluster",
parent="project/locations/us-west1-a/privateClouds/my-cloud")
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/vmwareengine"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := vmwareengine.LookupCluster(ctx, &vmwareengine.LookupClusterArgs{
Name: "my-cluster",
Parent: "project/locations/us-west1-a/privateClouds/my-cloud",
}, nil)
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var myCluster = Gcp.VMwareEngine.GetCluster.Invoke(new()
{
Name = "my-cluster",
Parent = "project/locations/us-west1-a/privateClouds/my-cloud",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.vmwareengine.VmwareengineFunctions;
import com.pulumi.gcp.vmwareengine.inputs.GetClusterArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var myCluster = VmwareengineFunctions.getCluster(GetClusterArgs.builder()
.name("my-cluster")
.parent("project/locations/us-west1-a/privateClouds/my-cloud")
.build());
}
}
variables:
myCluster:
fn::invoke:
function: gcp:vmwareengine:getCluster
arguments:
name: my-cluster
parent: project/locations/us-west1-a/privateClouds/my-cloud
Using getCluster
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getCluster(args: GetClusterArgs, opts?: InvokeOptions): Promise<GetClusterResult>
function getClusterOutput(args: GetClusterOutputArgs, opts?: InvokeOptions): Output<GetClusterResult>def get_cluster(name: Optional[str] = None,
parent: Optional[str] = None,
opts: Optional[InvokeOptions] = None) -> GetClusterResult
def get_cluster_output(name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
opts: Optional[InvokeOptions] = None) -> Output[GetClusterResult]func LookupCluster(ctx *Context, args *LookupClusterArgs, opts ...InvokeOption) (*LookupClusterResult, error)
func LookupClusterOutput(ctx *Context, args *LookupClusterOutputArgs, opts ...InvokeOption) LookupClusterResultOutput> Note: This function is named LookupCluster in the Go SDK.
public static class GetCluster
{
public static Task<GetClusterResult> InvokeAsync(GetClusterArgs args, InvokeOptions? opts = null)
public static Output<GetClusterResult> Invoke(GetClusterInvokeArgs args, InvokeOptions? opts = null)
}public static CompletableFuture<GetClusterResult> getCluster(GetClusterArgs args, InvokeOptions options)
public static Output<GetClusterResult> getCluster(GetClusterArgs args, InvokeOptions options)
fn::invoke:
function: gcp:vmwareengine/getCluster:getCluster
arguments:
# arguments dictionaryThe following arguments are supported:
getCluster Result
The following output properties are available:
- Autoscaling
Settings List<GetCluster Autoscaling Setting> - Create
Time string - Datastore
Mount List<GetConfigs Cluster Datastore Mount Config> - Id string
- The provider-assigned unique ID for this managed resource.
- Management bool
- Name string
- Node
Type List<GetConfigs Cluster Node Type Config> - Parent string
- State string
- Uid string
- Update
Time string
- Autoscaling
Settings []GetCluster Autoscaling Setting - Create
Time string - Datastore
Mount []GetConfigs Cluster Datastore Mount Config - Id string
- The provider-assigned unique ID for this managed resource.
- Management bool
- Name string
- Node
Type []GetConfigs Cluster Node Type Config - Parent string
- State string
- Uid string
- Update
Time string
- autoscaling
Settings List<GetCluster Autoscaling Setting> - create
Time String - datastore
Mount List<GetConfigs Cluster Datastore Mount Config> - id String
- The provider-assigned unique ID for this managed resource.
- management Boolean
- name String
- node
Type List<GetConfigs Cluster Node Type Config> - parent String
- state String
- uid String
- update
Time String
- autoscaling
Settings GetCluster Autoscaling Setting[] - create
Time string - datastore
Mount GetConfigs Cluster Datastore Mount Config[] - id string
- The provider-assigned unique ID for this managed resource.
- management boolean
- name string
- node
Type GetConfigs Cluster Node Type Config[] - parent string
- state string
- uid string
- update
Time string
- autoscaling_
settings Sequence[GetCluster Autoscaling Setting] - create_
time str - datastore_
mount_ Sequence[Getconfigs Cluster Datastore Mount Config] - id str
- The provider-assigned unique ID for this managed resource.
- management bool
- name str
- node_
type_ Sequence[Getconfigs Cluster Node Type Config] - parent str
- state str
- uid str
- update_
time str
- autoscaling
Settings List<Property Map> - create
Time String - datastore
Mount List<Property Map>Configs - id String
- The provider-assigned unique ID for this managed resource.
- management Boolean
- name String
- node
Type List<Property Map>Configs - parent String
- state String
- uid String
- update
Time String
Supporting Types
GetClusterAutoscalingSetting
- Autoscaling
Policies List<GetCluster Autoscaling Setting Autoscaling Policy> The map with autoscaling policies applied to the cluster. The key is the identifier of the policy. It must meet the following requirements:
- Only contains 1-63 alphanumeric characters and hyphens
- Begins with an alphabetical character
- Ends with a non-hyphen character
- Not formatted as a UUID
- Complies with RFC 1034 (section 3.5)
Currently the map must contain only one element that describes the autoscaling policy for compute nodes.
- Cool
Down stringPeriod - The minimum duration between consecutive autoscale operations. It starts once addition or removal of nodes is fully completed. Minimum cool down period is 30m. Cool down period must be in whole minutes (for example, 30m, 31m, 50m). Mandatory for successful addition of autoscaling settings in cluster.
- Max
Cluster intNode Count - Maximum number of nodes of any type in a cluster. Mandatory for successful addition of autoscaling settings in cluster.
- Min
Cluster intNode Count - Minimum number of nodes of any type in a cluster. Mandatory for successful addition of autoscaling settings in cluster.
- Autoscaling
Policies []GetCluster Autoscaling Setting Autoscaling Policy The map with autoscaling policies applied to the cluster. The key is the identifier of the policy. It must meet the following requirements:
- Only contains 1-63 alphanumeric characters and hyphens
- Begins with an alphabetical character
- Ends with a non-hyphen character
- Not formatted as a UUID
- Complies with RFC 1034 (section 3.5)
Currently the map must contain only one element that describes the autoscaling policy for compute nodes.
- Cool
Down stringPeriod - The minimum duration between consecutive autoscale operations. It starts once addition or removal of nodes is fully completed. Minimum cool down period is 30m. Cool down period must be in whole minutes (for example, 30m, 31m, 50m). Mandatory for successful addition of autoscaling settings in cluster.
- Max
Cluster intNode Count - Maximum number of nodes of any type in a cluster. Mandatory for successful addition of autoscaling settings in cluster.
- Min
Cluster intNode Count - Minimum number of nodes of any type in a cluster. Mandatory for successful addition of autoscaling settings in cluster.
- autoscaling
Policies List<GetCluster Autoscaling Setting Autoscaling Policy> The map with autoscaling policies applied to the cluster. The key is the identifier of the policy. It must meet the following requirements:
- Only contains 1-63 alphanumeric characters and hyphens
- Begins with an alphabetical character
- Ends with a non-hyphen character
- Not formatted as a UUID
- Complies with RFC 1034 (section 3.5)
Currently the map must contain only one element that describes the autoscaling policy for compute nodes.
- cool
Down StringPeriod - The minimum duration between consecutive autoscale operations. It starts once addition or removal of nodes is fully completed. Minimum cool down period is 30m. Cool down period must be in whole minutes (for example, 30m, 31m, 50m). Mandatory for successful addition of autoscaling settings in cluster.
- max
Cluster IntegerNode Count - Maximum number of nodes of any type in a cluster. Mandatory for successful addition of autoscaling settings in cluster.
- min
Cluster IntegerNode Count - Minimum number of nodes of any type in a cluster. Mandatory for successful addition of autoscaling settings in cluster.
- autoscaling
Policies GetCluster Autoscaling Setting Autoscaling Policy[] The map with autoscaling policies applied to the cluster. The key is the identifier of the policy. It must meet the following requirements:
- Only contains 1-63 alphanumeric characters and hyphens
- Begins with an alphabetical character
- Ends with a non-hyphen character
- Not formatted as a UUID
- Complies with RFC 1034 (section 3.5)
Currently the map must contain only one element that describes the autoscaling policy for compute nodes.
- cool
Down stringPeriod - The minimum duration between consecutive autoscale operations. It starts once addition or removal of nodes is fully completed. Minimum cool down period is 30m. Cool down period must be in whole minutes (for example, 30m, 31m, 50m). Mandatory for successful addition of autoscaling settings in cluster.
- max
Cluster numberNode Count - Maximum number of nodes of any type in a cluster. Mandatory for successful addition of autoscaling settings in cluster.
- min
Cluster numberNode Count - Minimum number of nodes of any type in a cluster. Mandatory for successful addition of autoscaling settings in cluster.
- autoscaling_
policies Sequence[GetCluster Autoscaling Setting Autoscaling Policy] The map with autoscaling policies applied to the cluster. The key is the identifier of the policy. It must meet the following requirements:
- Only contains 1-63 alphanumeric characters and hyphens
- Begins with an alphabetical character
- Ends with a non-hyphen character
- Not formatted as a UUID
- Complies with RFC 1034 (section 3.5)
Currently the map must contain only one element that describes the autoscaling policy for compute nodes.
- cool_
down_ strperiod - The minimum duration between consecutive autoscale operations. It starts once addition or removal of nodes is fully completed. Minimum cool down period is 30m. Cool down period must be in whole minutes (for example, 30m, 31m, 50m). Mandatory for successful addition of autoscaling settings in cluster.
- max_
cluster_ intnode_ count - Maximum number of nodes of any type in a cluster. Mandatory for successful addition of autoscaling settings in cluster.
- min_
cluster_ intnode_ count - Minimum number of nodes of any type in a cluster. Mandatory for successful addition of autoscaling settings in cluster.
- autoscaling
Policies List<Property Map> The map with autoscaling policies applied to the cluster. The key is the identifier of the policy. It must meet the following requirements:
- Only contains 1-63 alphanumeric characters and hyphens
- Begins with an alphabetical character
- Ends with a non-hyphen character
- Not formatted as a UUID
- Complies with RFC 1034 (section 3.5)
Currently the map must contain only one element that describes the autoscaling policy for compute nodes.
- cool
Down StringPeriod - The minimum duration between consecutive autoscale operations. It starts once addition or removal of nodes is fully completed. Minimum cool down period is 30m. Cool down period must be in whole minutes (for example, 30m, 31m, 50m). Mandatory for successful addition of autoscaling settings in cluster.
- max
Cluster NumberNode Count - Maximum number of nodes of any type in a cluster. Mandatory for successful addition of autoscaling settings in cluster.
- min
Cluster NumberNode Count - Minimum number of nodes of any type in a cluster. Mandatory for successful addition of autoscaling settings in cluster.
GetClusterAutoscalingSettingAutoscalingPolicy
- Autoscale
Policy stringId - Consumed
Memory List<GetThresholds Cluster Autoscaling Setting Autoscaling Policy Consumed Memory Threshold> - Utilization thresholds pertaining to amount of consumed memory.
- Cpu
Thresholds List<GetCluster Autoscaling Setting Autoscaling Policy Cpu Threshold> - Utilization thresholds pertaining to CPU utilization.
- Node
Type stringId - The canonical identifier of the node type to add or remove.
- Scale
Out intSize - Number of nodes to add to a cluster during a scale-out operation. Must be divisible by 2 for stretched clusters.
- Storage
Thresholds List<GetCluster Autoscaling Setting Autoscaling Policy Storage Threshold> - Utilization thresholds pertaining to amount of consumed storage.
- Autoscale
Policy stringId - Consumed
Memory []GetThresholds Cluster Autoscaling Setting Autoscaling Policy Consumed Memory Threshold - Utilization thresholds pertaining to amount of consumed memory.
- Cpu
Thresholds []GetCluster Autoscaling Setting Autoscaling Policy Cpu Threshold - Utilization thresholds pertaining to CPU utilization.
- Node
Type stringId - The canonical identifier of the node type to add or remove.
- Scale
Out intSize - Number of nodes to add to a cluster during a scale-out operation. Must be divisible by 2 for stretched clusters.
- Storage
Thresholds []GetCluster Autoscaling Setting Autoscaling Policy Storage Threshold - Utilization thresholds pertaining to amount of consumed storage.
- autoscale
Policy StringId - consumed
Memory List<GetThresholds Cluster Autoscaling Setting Autoscaling Policy Consumed Memory Threshold> - Utilization thresholds pertaining to amount of consumed memory.
- cpu
Thresholds List<GetCluster Autoscaling Setting Autoscaling Policy Cpu Threshold> - Utilization thresholds pertaining to CPU utilization.
- node
Type StringId - The canonical identifier of the node type to add or remove.
- scale
Out IntegerSize - Number of nodes to add to a cluster during a scale-out operation. Must be divisible by 2 for stretched clusters.
- storage
Thresholds List<GetCluster Autoscaling Setting Autoscaling Policy Storage Threshold> - Utilization thresholds pertaining to amount of consumed storage.
- autoscale
Policy stringId - consumed
Memory GetThresholds Cluster Autoscaling Setting Autoscaling Policy Consumed Memory Threshold[] - Utilization thresholds pertaining to amount of consumed memory.
- cpu
Thresholds GetCluster Autoscaling Setting Autoscaling Policy Cpu Threshold[] - Utilization thresholds pertaining to CPU utilization.
- node
Type stringId - The canonical identifier of the node type to add or remove.
- scale
Out numberSize - Number of nodes to add to a cluster during a scale-out operation. Must be divisible by 2 for stretched clusters.
- storage
Thresholds GetCluster Autoscaling Setting Autoscaling Policy Storage Threshold[] - Utilization thresholds pertaining to amount of consumed storage.
- autoscale_
policy_ strid - consumed_
memory_ Sequence[Getthresholds Cluster Autoscaling Setting Autoscaling Policy Consumed Memory Threshold] - Utilization thresholds pertaining to amount of consumed memory.
- cpu_
thresholds Sequence[GetCluster Autoscaling Setting Autoscaling Policy Cpu Threshold] - Utilization thresholds pertaining to CPU utilization.
- node_
type_ strid - The canonical identifier of the node type to add or remove.
- scale_
out_ intsize - Number of nodes to add to a cluster during a scale-out operation. Must be divisible by 2 for stretched clusters.
- storage_
thresholds Sequence[GetCluster Autoscaling Setting Autoscaling Policy Storage Threshold] - Utilization thresholds pertaining to amount of consumed storage.
- autoscale
Policy StringId - consumed
Memory List<Property Map>Thresholds - Utilization thresholds pertaining to amount of consumed memory.
- cpu
Thresholds List<Property Map> - Utilization thresholds pertaining to CPU utilization.
- node
Type StringId - The canonical identifier of the node type to add or remove.
- scale
Out NumberSize - Number of nodes to add to a cluster during a scale-out operation. Must be divisible by 2 for stretched clusters.
- storage
Thresholds List<Property Map> - Utilization thresholds pertaining to amount of consumed storage.
GetClusterAutoscalingSettingAutoscalingPolicyConsumedMemoryThreshold
GetClusterAutoscalingSettingAutoscalingPolicyCpuThreshold
GetClusterAutoscalingSettingAutoscalingPolicyStorageThreshold
GetClusterDatastoreMountConfig
- Access
Mode string - Optional. NFS is accessed by hosts in either read or read_write mode Default value used will be READ_WRITE Possible values: READ_ONLY READ_WRITE
- Datastore string
- The resource name of the datastore to unmount. The datastore requested to be mounted should be in same region/zone as the cluster. Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. For example: 'projects/my-project/locations/us-central1/datastores/my-datastore'
- Datastore
Networks List<GetCluster Datastore Mount Config Datastore Network> - The network configuration for the datastore.
- string
- File share name.
- Ignore
Colocation bool - Optional. If set to true, the colocation requirement will be ignored. If set to false, the colocation requirement will be enforced. Colocation requirement is the requirement that the cluster must be in the same region/zone of datastore.
- Nfs
Version string - Optional. The NFS protocol supported by the NFS volume. Default value used will be NFS_V3 Possible values: NFS_V3
- Servers List<string>
- Server IP addresses of the NFS volume. For NFS 3, you can only provide a single server IP address or DNS names.
- Access
Mode string - Optional. NFS is accessed by hosts in either read or read_write mode Default value used will be READ_WRITE Possible values: READ_ONLY READ_WRITE
- Datastore string
- The resource name of the datastore to unmount. The datastore requested to be mounted should be in same region/zone as the cluster. Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. For example: 'projects/my-project/locations/us-central1/datastores/my-datastore'
- Datastore
Networks []GetCluster Datastore Mount Config Datastore Network - The network configuration for the datastore.
- string
- File share name.
- Ignore
Colocation bool - Optional. If set to true, the colocation requirement will be ignored. If set to false, the colocation requirement will be enforced. Colocation requirement is the requirement that the cluster must be in the same region/zone of datastore.
- Nfs
Version string - Optional. The NFS protocol supported by the NFS volume. Default value used will be NFS_V3 Possible values: NFS_V3
- Servers []string
- Server IP addresses of the NFS volume. For NFS 3, you can only provide a single server IP address or DNS names.
- access
Mode String - Optional. NFS is accessed by hosts in either read or read_write mode Default value used will be READ_WRITE Possible values: READ_ONLY READ_WRITE
- datastore String
- The resource name of the datastore to unmount. The datastore requested to be mounted should be in same region/zone as the cluster. Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. For example: 'projects/my-project/locations/us-central1/datastores/my-datastore'
- datastore
Networks List<GetCluster Datastore Mount Config Datastore Network> - The network configuration for the datastore.
- String
- File share name.
- ignore
Colocation Boolean - Optional. If set to true, the colocation requirement will be ignored. If set to false, the colocation requirement will be enforced. Colocation requirement is the requirement that the cluster must be in the same region/zone of datastore.
- nfs
Version String - Optional. The NFS protocol supported by the NFS volume. Default value used will be NFS_V3 Possible values: NFS_V3
- servers List<String>
- Server IP addresses of the NFS volume. For NFS 3, you can only provide a single server IP address or DNS names.
- access
Mode string - Optional. NFS is accessed by hosts in either read or read_write mode Default value used will be READ_WRITE Possible values: READ_ONLY READ_WRITE
- datastore string
- The resource name of the datastore to unmount. The datastore requested to be mounted should be in same region/zone as the cluster. Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. For example: 'projects/my-project/locations/us-central1/datastores/my-datastore'
- datastore
Networks GetCluster Datastore Mount Config Datastore Network[] - The network configuration for the datastore.
- string
- File share name.
- ignore
Colocation boolean - Optional. If set to true, the colocation requirement will be ignored. If set to false, the colocation requirement will be enforced. Colocation requirement is the requirement that the cluster must be in the same region/zone of datastore.
- nfs
Version string - Optional. The NFS protocol supported by the NFS volume. Default value used will be NFS_V3 Possible values: NFS_V3
- servers string[]
- Server IP addresses of the NFS volume. For NFS 3, you can only provide a single server IP address or DNS names.
- access_
mode str - Optional. NFS is accessed by hosts in either read or read_write mode Default value used will be READ_WRITE Possible values: READ_ONLY READ_WRITE
- datastore str
- The resource name of the datastore to unmount. The datastore requested to be mounted should be in same region/zone as the cluster. Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. For example: 'projects/my-project/locations/us-central1/datastores/my-datastore'
- datastore_
networks Sequence[GetCluster Datastore Mount Config Datastore Network] - The network configuration for the datastore.
- str
- File share name.
- ignore_
colocation bool - Optional. If set to true, the colocation requirement will be ignored. If set to false, the colocation requirement will be enforced. Colocation requirement is the requirement that the cluster must be in the same region/zone of datastore.
- nfs_
version str - Optional. The NFS protocol supported by the NFS volume. Default value used will be NFS_V3 Possible values: NFS_V3
- servers Sequence[str]
- Server IP addresses of the NFS volume. For NFS 3, you can only provide a single server IP address or DNS names.
- access
Mode String - Optional. NFS is accessed by hosts in either read or read_write mode Default value used will be READ_WRITE Possible values: READ_ONLY READ_WRITE
- datastore String
- The resource name of the datastore to unmount. The datastore requested to be mounted should be in same region/zone as the cluster. Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. For example: 'projects/my-project/locations/us-central1/datastores/my-datastore'
- datastore
Networks List<Property Map> - The network configuration for the datastore.
- String
- File share name.
- ignore
Colocation Boolean - Optional. If set to true, the colocation requirement will be ignored. If set to false, the colocation requirement will be enforced. Colocation requirement is the requirement that the cluster must be in the same region/zone of datastore.
- nfs
Version String - Optional. The NFS protocol supported by the NFS volume. Default value used will be NFS_V3 Possible values: NFS_V3
- servers List<String>
- Server IP addresses of the NFS volume. For NFS 3, you can only provide a single server IP address or DNS names.
GetClusterDatastoreMountConfigDatastoreNetwork
- Connection
Count int - Optional. The number of connections of the NFS volume. Supported from vsphere 8.0u1. Possible values are 1-4. Default value is 4.
- Mtu int
- Optional. The Maximal Transmission Unit (MTU) of the datastore. MTU value can range from 1330-9000. If not set, system sets default MTU size to 1500.
- Network
Peering string - The resource name of the network peering, used to access the file share by clients on private cloud. Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. e.g. projects/my-project/locations/us-central1/networkPeerings/my-network-peering
- Subnet string
- The resource name of the subnet Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. e.g. projects/my-project/locations/us-central1/subnets/my-subnet
- Connection
Count int - Optional. The number of connections of the NFS volume. Supported from vsphere 8.0u1. Possible values are 1-4. Default value is 4.
- Mtu int
- Optional. The Maximal Transmission Unit (MTU) of the datastore. MTU value can range from 1330-9000. If not set, system sets default MTU size to 1500.
- Network
Peering string - The resource name of the network peering, used to access the file share by clients on private cloud. Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. e.g. projects/my-project/locations/us-central1/networkPeerings/my-network-peering
- Subnet string
- The resource name of the subnet Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. e.g. projects/my-project/locations/us-central1/subnets/my-subnet
- connection
Count Integer - Optional. The number of connections of the NFS volume. Supported from vsphere 8.0u1. Possible values are 1-4. Default value is 4.
- mtu Integer
- Optional. The Maximal Transmission Unit (MTU) of the datastore. MTU value can range from 1330-9000. If not set, system sets default MTU size to 1500.
- network
Peering String - The resource name of the network peering, used to access the file share by clients on private cloud. Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. e.g. projects/my-project/locations/us-central1/networkPeerings/my-network-peering
- subnet String
- The resource name of the subnet Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. e.g. projects/my-project/locations/us-central1/subnets/my-subnet
- connection
Count number - Optional. The number of connections of the NFS volume. Supported from vsphere 8.0u1. Possible values are 1-4. Default value is 4.
- mtu number
- Optional. The Maximal Transmission Unit (MTU) of the datastore. MTU value can range from 1330-9000. If not set, system sets default MTU size to 1500.
- network
Peering string - The resource name of the network peering, used to access the file share by clients on private cloud. Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. e.g. projects/my-project/locations/us-central1/networkPeerings/my-network-peering
- subnet string
- The resource name of the subnet Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. e.g. projects/my-project/locations/us-central1/subnets/my-subnet
- connection_
count int - Optional. The number of connections of the NFS volume. Supported from vsphere 8.0u1. Possible values are 1-4. Default value is 4.
- mtu int
- Optional. The Maximal Transmission Unit (MTU) of the datastore. MTU value can range from 1330-9000. If not set, system sets default MTU size to 1500.
- network_
peering str - The resource name of the network peering, used to access the file share by clients on private cloud. Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. e.g. projects/my-project/locations/us-central1/networkPeerings/my-network-peering
- subnet str
- The resource name of the subnet Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. e.g. projects/my-project/locations/us-central1/subnets/my-subnet
- connection
Count Number - Optional. The number of connections of the NFS volume. Supported from vsphere 8.0u1. Possible values are 1-4. Default value is 4.
- mtu Number
- Optional. The Maximal Transmission Unit (MTU) of the datastore. MTU value can range from 1330-9000. If not set, system sets default MTU size to 1500.
- network
Peering String - The resource name of the network peering, used to access the file share by clients on private cloud. Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. e.g. projects/my-project/locations/us-central1/networkPeerings/my-network-peering
- subnet String
- The resource name of the subnet Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. e.g. projects/my-project/locations/us-central1/subnets/my-subnet
GetClusterNodeTypeConfig
- Custom
Core intCount - Customized number of cores available to each node of the type. This number must always be one of 'nodeType.availableCustomCoreCounts'. If zero is provided max value from 'nodeType.availableCustomCoreCounts' will be used. Once the customer is created then corecount cannot be changed.
- Node
Count int - The number of nodes of this type in the cluster.
- Node
Type stringId
- Custom
Core intCount - Customized number of cores available to each node of the type. This number must always be one of 'nodeType.availableCustomCoreCounts'. If zero is provided max value from 'nodeType.availableCustomCoreCounts' will be used. Once the customer is created then corecount cannot be changed.
- Node
Count int - The number of nodes of this type in the cluster.
- Node
Type stringId
- custom
Core IntegerCount - Customized number of cores available to each node of the type. This number must always be one of 'nodeType.availableCustomCoreCounts'. If zero is provided max value from 'nodeType.availableCustomCoreCounts' will be used. Once the customer is created then corecount cannot be changed.
- node
Count Integer - The number of nodes of this type in the cluster.
- node
Type StringId
- custom
Core numberCount - Customized number of cores available to each node of the type. This number must always be one of 'nodeType.availableCustomCoreCounts'. If zero is provided max value from 'nodeType.availableCustomCoreCounts' will be used. Once the customer is created then corecount cannot be changed.
- node
Count number - The number of nodes of this type in the cluster.
- node
Type stringId
- custom_
core_ intcount - Customized number of cores available to each node of the type. This number must always be one of 'nodeType.availableCustomCoreCounts'. If zero is provided max value from 'nodeType.availableCustomCoreCounts' will be used. Once the customer is created then corecount cannot be changed.
- node_
count int - The number of nodes of this type in the cluster.
- node_
type_ strid
- custom
Core NumberCount - Customized number of cores available to each node of the type. This number must always be one of 'nodeType.availableCustomCoreCounts'. If zero is provided max value from 'nodeType.availableCustomCoreCounts' will be used. Once the customer is created then corecount cannot be changed.
- node
Count Number - The number of nodes of this type in the cluster.
- node
Type StringId
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-betaTerraform Provider.
published on Thursday, Mar 12, 2026 by Pulumi
