1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. hypercomputecluster
  5. Cluster
Viewing docs for Google Cloud v9.15.0
published on Thursday, Mar 12, 2026 by Pulumi
gcp logo
Viewing docs for Google Cloud v9.15.0
published on Thursday, Mar 12, 2026 by Pulumi

    A collection of virtual machines and connected resources forming a high-performance computing cluster capable of running large-scale, tightly coupled workloads. A cluster combines a set a compute resources that perform computations, storage resources that contain inputs and store outputs, an orchestrator that is responsible for assigning jobs to compute resources, and network resources that connect everything together.

    Example Usage

    Hypercomputecluster Cluster Basic

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const project = gcp.organizations.getProject({});
    const projectId = project.then(project => project.name);
    const cluster = new gcp.hypercomputecluster.Cluster("cluster", {
        clusterId: "my-cluster",
        location: "us-central1",
        description: "Cluster Director instance created through Terraform",
        networkResources: [{
            id: "network1",
            config: {
                newNetwork: {
                    description: "Network one",
                    network: projectId.then(projectId => `projects/${projectId}/global/networks/cluster-net1`),
                },
            },
        }],
        computeResources: [{
            id: "compute1",
            config: {
                newOnDemandInstances: {
                    machineType: "n2-standard-2",
                    zone: "us-central1-a",
                },
            },
        }],
        orchestrator: {
            slurm: {
                loginNodes: {
                    machineType: "n2-standard-2",
                    count: "1",
                    zone: "us-central1-a",
                    bootDisk: {
                        sizeGb: "100",
                        type: "pd-balanced",
                    },
                },
                nodeSets: [{
                    id: "nodeset1",
                    computeId: "compute1",
                    staticNodeCount: "1",
                    computeInstance: {
                        bootDisk: {
                            sizeGb: "100",
                            type: "pd-balanced",
                        },
                    },
                }],
                partitions: [{
                    id: "partition1",
                    nodeSetIds: ["nodeset1"],
                }],
                defaultPartition: "partition1",
            },
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    project = gcp.organizations.get_project()
    project_id = project.name
    cluster = gcp.hypercomputecluster.Cluster("cluster",
        cluster_id="my-cluster",
        location="us-central1",
        description="Cluster Director instance created through Terraform",
        network_resources=[{
            "id": "network1",
            "config": {
                "new_network": {
                    "description": "Network one",
                    "network": f"projects/{project_id}/global/networks/cluster-net1",
                },
            },
        }],
        compute_resources=[{
            "id": "compute1",
            "config": {
                "new_on_demand_instances": {
                    "machine_type": "n2-standard-2",
                    "zone": "us-central1-a",
                },
            },
        }],
        orchestrator={
            "slurm": {
                "login_nodes": {
                    "machine_type": "n2-standard-2",
                    "count": "1",
                    "zone": "us-central1-a",
                    "boot_disk": {
                        "size_gb": "100",
                        "type": "pd-balanced",
                    },
                },
                "node_sets": [{
                    "id": "nodeset1",
                    "compute_id": "compute1",
                    "static_node_count": "1",
                    "compute_instance": {
                        "boot_disk": {
                            "size_gb": "100",
                            "type": "pd-balanced",
                        },
                    },
                }],
                "partitions": [{
                    "id": "partition1",
                    "node_set_ids": ["nodeset1"],
                }],
                "default_partition": "partition1",
            },
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/hypercomputecluster"
    	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/organizations"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
    		if err != nil {
    			return err
    		}
    		projectId := project.Name
    		_, err = hypercomputecluster.NewCluster(ctx, "cluster", &hypercomputecluster.ClusterArgs{
    			ClusterId:   pulumi.String("my-cluster"),
    			Location:    pulumi.String("us-central1"),
    			Description: pulumi.String("Cluster Director instance created through Terraform"),
    			NetworkResources: hypercomputecluster.ClusterNetworkResourceArray{
    				&hypercomputecluster.ClusterNetworkResourceArgs{
    					Id: pulumi.String("network1"),
    					Config: &hypercomputecluster.ClusterNetworkResourceConfigArgs{
    						NewNetwork: &hypercomputecluster.ClusterNetworkResourceConfigNewNetworkArgs{
    							Description: pulumi.String("Network one"),
    							Network:     pulumi.Sprintf("projects/%v/global/networks/cluster-net1", projectId),
    						},
    					},
    				},
    			},
    			ComputeResources: hypercomputecluster.ClusterComputeResourceArray{
    				&hypercomputecluster.ClusterComputeResourceArgs{
    					Id: pulumi.String("compute1"),
    					Config: &hypercomputecluster.ClusterComputeResourceConfigArgs{
    						NewOnDemandInstances: &hypercomputecluster.ClusterComputeResourceConfigNewOnDemandInstancesArgs{
    							MachineType: pulumi.String("n2-standard-2"),
    							Zone:        pulumi.String("us-central1-a"),
    						},
    					},
    				},
    			},
    			Orchestrator: &hypercomputecluster.ClusterOrchestratorArgs{
    				Slurm: &hypercomputecluster.ClusterOrchestratorSlurmArgs{
    					LoginNodes: &hypercomputecluster.ClusterOrchestratorSlurmLoginNodesArgs{
    						MachineType: pulumi.String("n2-standard-2"),
    						Count:       pulumi.String("1"),
    						Zone:        pulumi.String("us-central1-a"),
    						BootDisk: &hypercomputecluster.ClusterOrchestratorSlurmLoginNodesBootDiskArgs{
    							SizeGb: pulumi.String("100"),
    							Type:   pulumi.String("pd-balanced"),
    						},
    					},
    					NodeSets: hypercomputecluster.ClusterOrchestratorSlurmNodeSetArray{
    						&hypercomputecluster.ClusterOrchestratorSlurmNodeSetArgs{
    							Id:              pulumi.String("nodeset1"),
    							ComputeId:       pulumi.String("compute1"),
    							StaticNodeCount: pulumi.String("1"),
    							ComputeInstance: &hypercomputecluster.ClusterOrchestratorSlurmNodeSetComputeInstanceArgs{
    								BootDisk: &hypercomputecluster.ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs{
    									SizeGb: pulumi.String("100"),
    									Type:   pulumi.String("pd-balanced"),
    								},
    							},
    						},
    					},
    					Partitions: hypercomputecluster.ClusterOrchestratorSlurmPartitionArray{
    						&hypercomputecluster.ClusterOrchestratorSlurmPartitionArgs{
    							Id: pulumi.String("partition1"),
    							NodeSetIds: pulumi.StringArray{
    								pulumi.String("nodeset1"),
    							},
    						},
    					},
    					DefaultPartition: pulumi.String("partition1"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var project = Gcp.Organizations.GetProject.Invoke();
    
        var projectId = project.Apply(getProjectResult => getProjectResult.Name);
    
        var cluster = new Gcp.HyperComputeCluster.Cluster("cluster", new()
        {
            ClusterId = "my-cluster",
            Location = "us-central1",
            Description = "Cluster Director instance created through Terraform",
            NetworkResources = new[]
            {
                new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceArgs
                {
                    Id = "network1",
                    Config = new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceConfigArgs
                    {
                        NewNetwork = new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceConfigNewNetworkArgs
                        {
                            Description = "Network one",
                            Network = projectId.Apply(projectId => $"projects/{projectId}/global/networks/cluster-net1"),
                        },
                    },
                },
            },
            ComputeResources = new[]
            {
                new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceArgs
                {
                    Id = "compute1",
                    Config = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigArgs
                    {
                        NewOnDemandInstances = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigNewOnDemandInstancesArgs
                        {
                            MachineType = "n2-standard-2",
                            Zone = "us-central1-a",
                        },
                    },
                },
            },
            Orchestrator = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorArgs
            {
                Slurm = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmArgs
                {
                    LoginNodes = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmLoginNodesArgs
                    {
                        MachineType = "n2-standard-2",
                        Count = "1",
                        Zone = "us-central1-a",
                        BootDisk = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmLoginNodesBootDiskArgs
                        {
                            SizeGb = "100",
                            Type = "pd-balanced",
                        },
                    },
                    NodeSets = new[]
                    {
                        new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetArgs
                        {
                            Id = "nodeset1",
                            ComputeId = "compute1",
                            StaticNodeCount = "1",
                            ComputeInstance = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetComputeInstanceArgs
                            {
                                BootDisk = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs
                                {
                                    SizeGb = "100",
                                    Type = "pd-balanced",
                                },
                            },
                        },
                    },
                    Partitions = new[]
                    {
                        new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmPartitionArgs
                        {
                            Id = "partition1",
                            NodeSetIds = new[]
                            {
                                "nodeset1",
                            },
                        },
                    },
                    DefaultPartition = "partition1",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.organizations.OrganizationsFunctions;
    import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
    import com.pulumi.gcp.hypercomputecluster.Cluster;
    import com.pulumi.gcp.hypercomputecluster.ClusterArgs;
    import com.pulumi.gcp.hypercomputecluster.inputs.ClusterNetworkResourceArgs;
    import com.pulumi.gcp.hypercomputecluster.inputs.ClusterNetworkResourceConfigArgs;
    import com.pulumi.gcp.hypercomputecluster.inputs.ClusterNetworkResourceConfigNewNetworkArgs;
    import com.pulumi.gcp.hypercomputecluster.inputs.ClusterComputeResourceArgs;
    import com.pulumi.gcp.hypercomputecluster.inputs.ClusterComputeResourceConfigArgs;
    import com.pulumi.gcp.hypercomputecluster.inputs.ClusterComputeResourceConfigNewOnDemandInstancesArgs;
    import com.pulumi.gcp.hypercomputecluster.inputs.ClusterOrchestratorArgs;
    import com.pulumi.gcp.hypercomputecluster.inputs.ClusterOrchestratorSlurmArgs;
    import com.pulumi.gcp.hypercomputecluster.inputs.ClusterOrchestratorSlurmLoginNodesArgs;
    import com.pulumi.gcp.hypercomputecluster.inputs.ClusterOrchestratorSlurmLoginNodesBootDiskArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var project = OrganizationsFunctions.getProject(GetProjectArgs.builder()
                .build());
    
            final var projectId = project.name();
    
            var cluster = new Cluster("cluster", ClusterArgs.builder()
                .clusterId("my-cluster")
                .location("us-central1")
                .description("Cluster Director instance created through Terraform")
                .networkResources(ClusterNetworkResourceArgs.builder()
                    .id("network1")
                    .config(ClusterNetworkResourceConfigArgs.builder()
                        .newNetwork(ClusterNetworkResourceConfigNewNetworkArgs.builder()
                            .description("Network one")
                            .network(String.format("projects/%s/global/networks/cluster-net1", projectId))
                            .build())
                        .build())
                    .build())
                .computeResources(ClusterComputeResourceArgs.builder()
                    .id("compute1")
                    .config(ClusterComputeResourceConfigArgs.builder()
                        .newOnDemandInstances(ClusterComputeResourceConfigNewOnDemandInstancesArgs.builder()
                            .machineType("n2-standard-2")
                            .zone("us-central1-a")
                            .build())
                        .build())
                    .build())
                .orchestrator(ClusterOrchestratorArgs.builder()
                    .slurm(ClusterOrchestratorSlurmArgs.builder()
                        .loginNodes(ClusterOrchestratorSlurmLoginNodesArgs.builder()
                            .machineType("n2-standard-2")
                            .count("1")
                            .zone("us-central1-a")
                            .bootDisk(ClusterOrchestratorSlurmLoginNodesBootDiskArgs.builder()
                                .sizeGb("100")
                                .type("pd-balanced")
                                .build())
                            .build())
                        .nodeSets(ClusterOrchestratorSlurmNodeSetArgs.builder()
                            .id("nodeset1")
                            .computeId("compute1")
                            .staticNodeCount("1")
                            .computeInstance(ClusterOrchestratorSlurmNodeSetComputeInstanceArgs.builder()
                                .bootDisk(ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs.builder()
                                    .sizeGb("100")
                                    .type("pd-balanced")
                                    .build())
                                .build())
                            .build())
                        .partitions(ClusterOrchestratorSlurmPartitionArgs.builder()
                            .id("partition1")
                            .nodeSetIds("nodeset1")
                            .build())
                        .defaultPartition("partition1")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      cluster:
        type: gcp:hypercomputecluster:Cluster
        properties:
          clusterId: my-cluster
          location: us-central1
          description: Cluster Director instance created through Terraform
          networkResources:
            - id: network1
              config:
                newNetwork:
                  description: Network one
                  network: projects/${projectId}/global/networks/cluster-net1
          computeResources:
            - id: compute1
              config:
                newOnDemandInstances:
                  machineType: n2-standard-2
                  zone: us-central1-a
          orchestrator:
            slurm:
              loginNodes:
                machineType: n2-standard-2
                count: 1
                zone: us-central1-a
                bootDisk:
                  sizeGb: '100'
                  type: pd-balanced
              nodeSets:
                - id: nodeset1
                  computeId: compute1
                  staticNodeCount: 1
                  computeInstance:
                    bootDisk:
                      sizeGb: '100'
                      type: pd-balanced
              partitions:
                - id: partition1
                  nodeSetIds:
                    - nodeset1
              defaultPartition: partition1
    variables:
      project:
        fn::invoke:
          function: gcp:organizations:getProject
          arguments: {}
      projectId: ${project.name}
    

    Create Cluster Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Cluster(name: string, args: ClusterArgs, opts?: CustomResourceOptions);
    @overload
    def Cluster(resource_name: str,
                args: ClusterArgs,
                opts: Optional[ResourceOptions] = None)
    
    @overload
    def Cluster(resource_name: str,
                opts: Optional[ResourceOptions] = None,
                cluster_id: Optional[str] = None,
                location: Optional[str] = None,
                compute_resources: Optional[Sequence[ClusterComputeResourceArgs]] = None,
                description: Optional[str] = None,
                labels: Optional[Mapping[str, str]] = None,
                network_resources: Optional[Sequence[ClusterNetworkResourceArgs]] = None,
                orchestrator: Optional[ClusterOrchestratorArgs] = None,
                project: Optional[str] = None,
                storage_resources: Optional[Sequence[ClusterStorageResourceArgs]] = None)
    func NewCluster(ctx *Context, name string, args ClusterArgs, opts ...ResourceOption) (*Cluster, error)
    public Cluster(string name, ClusterArgs args, CustomResourceOptions? opts = null)
    public Cluster(String name, ClusterArgs args)
    public Cluster(String name, ClusterArgs args, CustomResourceOptions options)
    
    type: gcp:hypercomputecluster:Cluster
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var exampleclusterResourceResourceFromHypercomputeclustercluster = new Gcp.HyperComputeCluster.Cluster("exampleclusterResourceResourceFromHypercomputeclustercluster", new()
    {
        ClusterId = "string",
        Location = "string",
        ComputeResources = new[]
        {
            new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceArgs
            {
                Config = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigArgs
                {
                    NewFlexStartInstances = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigNewFlexStartInstancesArgs
                    {
                        MachineType = "string",
                        MaxDuration = "string",
                        Zone = "string",
                    },
                    NewOnDemandInstances = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigNewOnDemandInstancesArgs
                    {
                        MachineType = "string",
                        Zone = "string",
                    },
                    NewReservedInstances = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigNewReservedInstancesArgs
                    {
                        Reservation = "string",
                    },
                    NewSpotInstances = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigNewSpotInstancesArgs
                    {
                        MachineType = "string",
                        Zone = "string",
                        TerminationAction = "string",
                    },
                },
                Id = "string",
            },
        },
        Description = "string",
        Labels = 
        {
            { "string", "string" },
        },
        NetworkResources = new[]
        {
            new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceArgs
            {
                Id = "string",
                Config = new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceConfigArgs
                {
                    ExistingNetwork = new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceConfigExistingNetworkArgs
                    {
                        Network = "string",
                        Subnetwork = "string",
                    },
                    NewNetwork = new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceConfigNewNetworkArgs
                    {
                        Network = "string",
                        Description = "string",
                    },
                },
                Networks = new[]
                {
                    new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceNetworkArgs
                    {
                        Network = "string",
                        Subnetwork = "string",
                    },
                },
            },
        },
        Orchestrator = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorArgs
        {
            Slurm = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmArgs
            {
                LoginNodes = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmLoginNodesArgs
                {
                    Count = "string",
                    MachineType = "string",
                    Zone = "string",
                    BootDisk = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmLoginNodesBootDiskArgs
                    {
                        SizeGb = "string",
                        Type = "string",
                    },
                    EnableOsLogin = false,
                    EnablePublicIps = false,
                    Instances = new[]
                    {
                        new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmLoginNodesInstanceArgs
                        {
                            Instance = "string",
                        },
                    },
                    Labels = 
                    {
                        { "string", "string" },
                    },
                    StartupScript = "string",
                    StorageConfigs = new[]
                    {
                        new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmLoginNodesStorageConfigArgs
                        {
                            Id = "string",
                            LocalMount = "string",
                        },
                    },
                },
                NodeSets = new[]
                {
                    new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetArgs
                    {
                        Id = "string",
                        ComputeId = "string",
                        ComputeInstance = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetComputeInstanceArgs
                        {
                            BootDisk = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs
                            {
                                SizeGb = "string",
                                Type = "string",
                            },
                            Labels = 
                            {
                                { "string", "string" },
                            },
                            StartupScript = "string",
                        },
                        MaxDynamicNodeCount = "string",
                        StaticNodeCount = "string",
                        StorageConfigs = new[]
                        {
                            new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetStorageConfigArgs
                            {
                                Id = "string",
                                LocalMount = "string",
                            },
                        },
                    },
                },
                Partitions = new[]
                {
                    new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmPartitionArgs
                    {
                        Id = "string",
                        NodeSetIds = new[]
                        {
                            "string",
                        },
                    },
                },
                DefaultPartition = "string",
                EpilogBashScripts = new[]
                {
                    "string",
                },
                PrologBashScripts = new[]
                {
                    "string",
                },
            },
        },
        Project = "string",
        StorageResources = new[]
        {
            new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceArgs
            {
                Config = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigArgs
                {
                    ExistingBucket = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigExistingBucketArgs
                    {
                        Bucket = "string",
                    },
                    ExistingFilestore = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigExistingFilestoreArgs
                    {
                        Filestore = "string",
                    },
                    ExistingLustre = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigExistingLustreArgs
                    {
                        Lustre = "string",
                    },
                    NewBucket = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigNewBucketArgs
                    {
                        Bucket = "string",
                        Autoclass = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigNewBucketAutoclassArgs
                        {
                            Enabled = false,
                        },
                        HierarchicalNamespace = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigNewBucketHierarchicalNamespaceArgs
                        {
                            Enabled = false,
                        },
                        StorageClass = "string",
                    },
                    NewFilestore = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigNewFilestoreArgs
                    {
                        FileShares = new[]
                        {
                            new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigNewFilestoreFileShareArgs
                            {
                                CapacityGb = "string",
                                FileShare = "string",
                            },
                        },
                        Filestore = "string",
                        Tier = "string",
                        Description = "string",
                        Protocol = "string",
                    },
                    NewLustre = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigNewLustreArgs
                    {
                        CapacityGb = "string",
                        Filesystem = "string",
                        Lustre = "string",
                        Description = "string",
                    },
                },
                Id = "string",
                Buckets = new[]
                {
                    new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceBucketArgs
                    {
                        Bucket = "string",
                    },
                },
                Filestores = new[]
                {
                    new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceFilestoreArgs
                    {
                        Filestore = "string",
                    },
                },
                Lustres = new[]
                {
                    new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceLustreArgs
                    {
                        Lustre = "string",
                    },
                },
            },
        },
    });
    
    example, err := hypercomputecluster.NewCluster(ctx, "exampleclusterResourceResourceFromHypercomputeclustercluster", &hypercomputecluster.ClusterArgs{
    	ClusterId: pulumi.String("string"),
    	Location:  pulumi.String("string"),
    	ComputeResources: hypercomputecluster.ClusterComputeResourceArray{
    		&hypercomputecluster.ClusterComputeResourceArgs{
    			Config: &hypercomputecluster.ClusterComputeResourceConfigArgs{
    				NewFlexStartInstances: &hypercomputecluster.ClusterComputeResourceConfigNewFlexStartInstancesArgs{
    					MachineType: pulumi.String("string"),
    					MaxDuration: pulumi.String("string"),
    					Zone:        pulumi.String("string"),
    				},
    				NewOnDemandInstances: &hypercomputecluster.ClusterComputeResourceConfigNewOnDemandInstancesArgs{
    					MachineType: pulumi.String("string"),
    					Zone:        pulumi.String("string"),
    				},
    				NewReservedInstances: &hypercomputecluster.ClusterComputeResourceConfigNewReservedInstancesArgs{
    					Reservation: pulumi.String("string"),
    				},
    				NewSpotInstances: &hypercomputecluster.ClusterComputeResourceConfigNewSpotInstancesArgs{
    					MachineType:       pulumi.String("string"),
    					Zone:              pulumi.String("string"),
    					TerminationAction: pulumi.String("string"),
    				},
    			},
    			Id: pulumi.String("string"),
    		},
    	},
    	Description: pulumi.String("string"),
    	Labels: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	NetworkResources: hypercomputecluster.ClusterNetworkResourceArray{
    		&hypercomputecluster.ClusterNetworkResourceArgs{
    			Id: pulumi.String("string"),
    			Config: &hypercomputecluster.ClusterNetworkResourceConfigArgs{
    				ExistingNetwork: &hypercomputecluster.ClusterNetworkResourceConfigExistingNetworkArgs{
    					Network:    pulumi.String("string"),
    					Subnetwork: pulumi.String("string"),
    				},
    				NewNetwork: &hypercomputecluster.ClusterNetworkResourceConfigNewNetworkArgs{
    					Network:     pulumi.String("string"),
    					Description: pulumi.String("string"),
    				},
    			},
    			Networks: hypercomputecluster.ClusterNetworkResourceNetworkArray{
    				&hypercomputecluster.ClusterNetworkResourceNetworkArgs{
    					Network:    pulumi.String("string"),
    					Subnetwork: pulumi.String("string"),
    				},
    			},
    		},
    	},
    	Orchestrator: &hypercomputecluster.ClusterOrchestratorArgs{
    		Slurm: &hypercomputecluster.ClusterOrchestratorSlurmArgs{
    			LoginNodes: &hypercomputecluster.ClusterOrchestratorSlurmLoginNodesArgs{
    				Count:       pulumi.String("string"),
    				MachineType: pulumi.String("string"),
    				Zone:        pulumi.String("string"),
    				BootDisk: &hypercomputecluster.ClusterOrchestratorSlurmLoginNodesBootDiskArgs{
    					SizeGb: pulumi.String("string"),
    					Type:   pulumi.String("string"),
    				},
    				EnableOsLogin:   pulumi.Bool(false),
    				EnablePublicIps: pulumi.Bool(false),
    				Instances: hypercomputecluster.ClusterOrchestratorSlurmLoginNodesInstanceArray{
    					&hypercomputecluster.ClusterOrchestratorSlurmLoginNodesInstanceArgs{
    						Instance: pulumi.String("string"),
    					},
    				},
    				Labels: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    				StartupScript: pulumi.String("string"),
    				StorageConfigs: hypercomputecluster.ClusterOrchestratorSlurmLoginNodesStorageConfigArray{
    					&hypercomputecluster.ClusterOrchestratorSlurmLoginNodesStorageConfigArgs{
    						Id:         pulumi.String("string"),
    						LocalMount: pulumi.String("string"),
    					},
    				},
    			},
    			NodeSets: hypercomputecluster.ClusterOrchestratorSlurmNodeSetArray{
    				&hypercomputecluster.ClusterOrchestratorSlurmNodeSetArgs{
    					Id:        pulumi.String("string"),
    					ComputeId: pulumi.String("string"),
    					ComputeInstance: &hypercomputecluster.ClusterOrchestratorSlurmNodeSetComputeInstanceArgs{
    						BootDisk: &hypercomputecluster.ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs{
    							SizeGb: pulumi.String("string"),
    							Type:   pulumi.String("string"),
    						},
    						Labels: pulumi.StringMap{
    							"string": pulumi.String("string"),
    						},
    						StartupScript: pulumi.String("string"),
    					},
    					MaxDynamicNodeCount: pulumi.String("string"),
    					StaticNodeCount:     pulumi.String("string"),
    					StorageConfigs: hypercomputecluster.ClusterOrchestratorSlurmNodeSetStorageConfigArray{
    						&hypercomputecluster.ClusterOrchestratorSlurmNodeSetStorageConfigArgs{
    							Id:         pulumi.String("string"),
    							LocalMount: pulumi.String("string"),
    						},
    					},
    				},
    			},
    			Partitions: hypercomputecluster.ClusterOrchestratorSlurmPartitionArray{
    				&hypercomputecluster.ClusterOrchestratorSlurmPartitionArgs{
    					Id: pulumi.String("string"),
    					NodeSetIds: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    				},
    			},
    			DefaultPartition: pulumi.String("string"),
    			EpilogBashScripts: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			PrologBashScripts: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    	},
    	Project: pulumi.String("string"),
    	StorageResources: hypercomputecluster.ClusterStorageResourceArray{
    		&hypercomputecluster.ClusterStorageResourceArgs{
    			Config: &hypercomputecluster.ClusterStorageResourceConfigArgs{
    				ExistingBucket: &hypercomputecluster.ClusterStorageResourceConfigExistingBucketArgs{
    					Bucket: pulumi.String("string"),
    				},
    				ExistingFilestore: &hypercomputecluster.ClusterStorageResourceConfigExistingFilestoreArgs{
    					Filestore: pulumi.String("string"),
    				},
    				ExistingLustre: &hypercomputecluster.ClusterStorageResourceConfigExistingLustreArgs{
    					Lustre: pulumi.String("string"),
    				},
    				NewBucket: &hypercomputecluster.ClusterStorageResourceConfigNewBucketArgs{
    					Bucket: pulumi.String("string"),
    					Autoclass: &hypercomputecluster.ClusterStorageResourceConfigNewBucketAutoclassArgs{
    						Enabled: pulumi.Bool(false),
    					},
    					HierarchicalNamespace: &hypercomputecluster.ClusterStorageResourceConfigNewBucketHierarchicalNamespaceArgs{
    						Enabled: pulumi.Bool(false),
    					},
    					StorageClass: pulumi.String("string"),
    				},
    				NewFilestore: &hypercomputecluster.ClusterStorageResourceConfigNewFilestoreArgs{
    					FileShares: hypercomputecluster.ClusterStorageResourceConfigNewFilestoreFileShareArray{
    						&hypercomputecluster.ClusterStorageResourceConfigNewFilestoreFileShareArgs{
    							CapacityGb: pulumi.String("string"),
    							FileShare:  pulumi.String("string"),
    						},
    					},
    					Filestore:   pulumi.String("string"),
    					Tier:        pulumi.String("string"),
    					Description: pulumi.String("string"),
    					Protocol:    pulumi.String("string"),
    				},
    				NewLustre: &hypercomputecluster.ClusterStorageResourceConfigNewLustreArgs{
    					CapacityGb:  pulumi.String("string"),
    					Filesystem:  pulumi.String("string"),
    					Lustre:      pulumi.String("string"),
    					Description: pulumi.String("string"),
    				},
    			},
    			Id: pulumi.String("string"),
    			Buckets: hypercomputecluster.ClusterStorageResourceBucketArray{
    				&hypercomputecluster.ClusterStorageResourceBucketArgs{
    					Bucket: pulumi.String("string"),
    				},
    			},
    			Filestores: hypercomputecluster.ClusterStorageResourceFilestoreArray{
    				&hypercomputecluster.ClusterStorageResourceFilestoreArgs{
    					Filestore: pulumi.String("string"),
    				},
    			},
    			Lustres: hypercomputecluster.ClusterStorageResourceLustreArray{
    				&hypercomputecluster.ClusterStorageResourceLustreArgs{
    					Lustre: pulumi.String("string"),
    				},
    			},
    		},
    	},
    })
    
    var exampleclusterResourceResourceFromHypercomputeclustercluster = new com.pulumi.gcp.hypercomputecluster.Cluster("exampleclusterResourceResourceFromHypercomputeclustercluster", com.pulumi.gcp.hypercomputecluster.ClusterArgs.builder()
        .clusterId("string")
        .location("string")
        .computeResources(ClusterComputeResourceArgs.builder()
            .config(ClusterComputeResourceConfigArgs.builder()
                .newFlexStartInstances(ClusterComputeResourceConfigNewFlexStartInstancesArgs.builder()
                    .machineType("string")
                    .maxDuration("string")
                    .zone("string")
                    .build())
                .newOnDemandInstances(ClusterComputeResourceConfigNewOnDemandInstancesArgs.builder()
                    .machineType("string")
                    .zone("string")
                    .build())
                .newReservedInstances(ClusterComputeResourceConfigNewReservedInstancesArgs.builder()
                    .reservation("string")
                    .build())
                .newSpotInstances(ClusterComputeResourceConfigNewSpotInstancesArgs.builder()
                    .machineType("string")
                    .zone("string")
                    .terminationAction("string")
                    .build())
                .build())
            .id("string")
            .build())
        .description("string")
        .labels(Map.of("string", "string"))
        .networkResources(ClusterNetworkResourceArgs.builder()
            .id("string")
            .config(ClusterNetworkResourceConfigArgs.builder()
                .existingNetwork(ClusterNetworkResourceConfigExistingNetworkArgs.builder()
                    .network("string")
                    .subnetwork("string")
                    .build())
                .newNetwork(ClusterNetworkResourceConfigNewNetworkArgs.builder()
                    .network("string")
                    .description("string")
                    .build())
                .build())
            .networks(ClusterNetworkResourceNetworkArgs.builder()
                .network("string")
                .subnetwork("string")
                .build())
            .build())
        .orchestrator(ClusterOrchestratorArgs.builder()
            .slurm(ClusterOrchestratorSlurmArgs.builder()
                .loginNodes(ClusterOrchestratorSlurmLoginNodesArgs.builder()
                    .count("string")
                    .machineType("string")
                    .zone("string")
                    .bootDisk(ClusterOrchestratorSlurmLoginNodesBootDiskArgs.builder()
                        .sizeGb("string")
                        .type("string")
                        .build())
                    .enableOsLogin(false)
                    .enablePublicIps(false)
                    .instances(ClusterOrchestratorSlurmLoginNodesInstanceArgs.builder()
                        .instance("string")
                        .build())
                    .labels(Map.of("string", "string"))
                    .startupScript("string")
                    .storageConfigs(ClusterOrchestratorSlurmLoginNodesStorageConfigArgs.builder()
                        .id("string")
                        .localMount("string")
                        .build())
                    .build())
                .nodeSets(ClusterOrchestratorSlurmNodeSetArgs.builder()
                    .id("string")
                    .computeId("string")
                    .computeInstance(ClusterOrchestratorSlurmNodeSetComputeInstanceArgs.builder()
                        .bootDisk(ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs.builder()
                            .sizeGb("string")
                            .type("string")
                            .build())
                        .labels(Map.of("string", "string"))
                        .startupScript("string")
                        .build())
                    .maxDynamicNodeCount("string")
                    .staticNodeCount("string")
                    .storageConfigs(ClusterOrchestratorSlurmNodeSetStorageConfigArgs.builder()
                        .id("string")
                        .localMount("string")
                        .build())
                    .build())
                .partitions(ClusterOrchestratorSlurmPartitionArgs.builder()
                    .id("string")
                    .nodeSetIds("string")
                    .build())
                .defaultPartition("string")
                .epilogBashScripts("string")
                .prologBashScripts("string")
                .build())
            .build())
        .project("string")
        .storageResources(ClusterStorageResourceArgs.builder()
            .config(ClusterStorageResourceConfigArgs.builder()
                .existingBucket(ClusterStorageResourceConfigExistingBucketArgs.builder()
                    .bucket("string")
                    .build())
                .existingFilestore(ClusterStorageResourceConfigExistingFilestoreArgs.builder()
                    .filestore("string")
                    .build())
                .existingLustre(ClusterStorageResourceConfigExistingLustreArgs.builder()
                    .lustre("string")
                    .build())
                .newBucket(ClusterStorageResourceConfigNewBucketArgs.builder()
                    .bucket("string")
                    .autoclass(ClusterStorageResourceConfigNewBucketAutoclassArgs.builder()
                        .enabled(false)
                        .build())
                    .hierarchicalNamespace(ClusterStorageResourceConfigNewBucketHierarchicalNamespaceArgs.builder()
                        .enabled(false)
                        .build())
                    .storageClass("string")
                    .build())
                .newFilestore(ClusterStorageResourceConfigNewFilestoreArgs.builder()
                    .fileShares(ClusterStorageResourceConfigNewFilestoreFileShareArgs.builder()
                        .capacityGb("string")
                        .fileShare("string")
                        .build())
                    .filestore("string")
                    .tier("string")
                    .description("string")
                    .protocol("string")
                    .build())
                .newLustre(ClusterStorageResourceConfigNewLustreArgs.builder()
                    .capacityGb("string")
                    .filesystem("string")
                    .lustre("string")
                    .description("string")
                    .build())
                .build())
            .id("string")
            .buckets(ClusterStorageResourceBucketArgs.builder()
                .bucket("string")
                .build())
            .filestores(ClusterStorageResourceFilestoreArgs.builder()
                .filestore("string")
                .build())
            .lustres(ClusterStorageResourceLustreArgs.builder()
                .lustre("string")
                .build())
            .build())
        .build());
    
    examplecluster_resource_resource_from_hypercomputeclustercluster = gcp.hypercomputecluster.Cluster("exampleclusterResourceResourceFromHypercomputeclustercluster",
        cluster_id="string",
        location="string",
        compute_resources=[{
            "config": {
                "new_flex_start_instances": {
                    "machine_type": "string",
                    "max_duration": "string",
                    "zone": "string",
                },
                "new_on_demand_instances": {
                    "machine_type": "string",
                    "zone": "string",
                },
                "new_reserved_instances": {
                    "reservation": "string",
                },
                "new_spot_instances": {
                    "machine_type": "string",
                    "zone": "string",
                    "termination_action": "string",
                },
            },
            "id": "string",
        }],
        description="string",
        labels={
            "string": "string",
        },
        network_resources=[{
            "id": "string",
            "config": {
                "existing_network": {
                    "network": "string",
                    "subnetwork": "string",
                },
                "new_network": {
                    "network": "string",
                    "description": "string",
                },
            },
            "networks": [{
                "network": "string",
                "subnetwork": "string",
            }],
        }],
        orchestrator={
            "slurm": {
                "login_nodes": {
                    "count": "string",
                    "machine_type": "string",
                    "zone": "string",
                    "boot_disk": {
                        "size_gb": "string",
                        "type": "string",
                    },
                    "enable_os_login": False,
                    "enable_public_ips": False,
                    "instances": [{
                        "instance": "string",
                    }],
                    "labels": {
                        "string": "string",
                    },
                    "startup_script": "string",
                    "storage_configs": [{
                        "id": "string",
                        "local_mount": "string",
                    }],
                },
                "node_sets": [{
                    "id": "string",
                    "compute_id": "string",
                    "compute_instance": {
                        "boot_disk": {
                            "size_gb": "string",
                            "type": "string",
                        },
                        "labels": {
                            "string": "string",
                        },
                        "startup_script": "string",
                    },
                    "max_dynamic_node_count": "string",
                    "static_node_count": "string",
                    "storage_configs": [{
                        "id": "string",
                        "local_mount": "string",
                    }],
                }],
                "partitions": [{
                    "id": "string",
                    "node_set_ids": ["string"],
                }],
                "default_partition": "string",
                "epilog_bash_scripts": ["string"],
                "prolog_bash_scripts": ["string"],
            },
        },
        project="string",
        storage_resources=[{
            "config": {
                "existing_bucket": {
                    "bucket": "string",
                },
                "existing_filestore": {
                    "filestore": "string",
                },
                "existing_lustre": {
                    "lustre": "string",
                },
                "new_bucket": {
                    "bucket": "string",
                    "autoclass": {
                        "enabled": False,
                    },
                    "hierarchical_namespace": {
                        "enabled": False,
                    },
                    "storage_class": "string",
                },
                "new_filestore": {
                    "file_shares": [{
                        "capacity_gb": "string",
                        "file_share": "string",
                    }],
                    "filestore": "string",
                    "tier": "string",
                    "description": "string",
                    "protocol": "string",
                },
                "new_lustre": {
                    "capacity_gb": "string",
                    "filesystem": "string",
                    "lustre": "string",
                    "description": "string",
                },
            },
            "id": "string",
            "buckets": [{
                "bucket": "string",
            }],
            "filestores": [{
                "filestore": "string",
            }],
            "lustres": [{
                "lustre": "string",
            }],
        }])
    
    const exampleclusterResourceResourceFromHypercomputeclustercluster = new gcp.hypercomputecluster.Cluster("exampleclusterResourceResourceFromHypercomputeclustercluster", {
        clusterId: "string",
        location: "string",
        computeResources: [{
            config: {
                newFlexStartInstances: {
                    machineType: "string",
                    maxDuration: "string",
                    zone: "string",
                },
                newOnDemandInstances: {
                    machineType: "string",
                    zone: "string",
                },
                newReservedInstances: {
                    reservation: "string",
                },
                newSpotInstances: {
                    machineType: "string",
                    zone: "string",
                    terminationAction: "string",
                },
            },
            id: "string",
        }],
        description: "string",
        labels: {
            string: "string",
        },
        networkResources: [{
            id: "string",
            config: {
                existingNetwork: {
                    network: "string",
                    subnetwork: "string",
                },
                newNetwork: {
                    network: "string",
                    description: "string",
                },
            },
            networks: [{
                network: "string",
                subnetwork: "string",
            }],
        }],
        orchestrator: {
            slurm: {
                loginNodes: {
                    count: "string",
                    machineType: "string",
                    zone: "string",
                    bootDisk: {
                        sizeGb: "string",
                        type: "string",
                    },
                    enableOsLogin: false,
                    enablePublicIps: false,
                    instances: [{
                        instance: "string",
                    }],
                    labels: {
                        string: "string",
                    },
                    startupScript: "string",
                    storageConfigs: [{
                        id: "string",
                        localMount: "string",
                    }],
                },
                nodeSets: [{
                    id: "string",
                    computeId: "string",
                    computeInstance: {
                        bootDisk: {
                            sizeGb: "string",
                            type: "string",
                        },
                        labels: {
                            string: "string",
                        },
                        startupScript: "string",
                    },
                    maxDynamicNodeCount: "string",
                    staticNodeCount: "string",
                    storageConfigs: [{
                        id: "string",
                        localMount: "string",
                    }],
                }],
                partitions: [{
                    id: "string",
                    nodeSetIds: ["string"],
                }],
                defaultPartition: "string",
                epilogBashScripts: ["string"],
                prologBashScripts: ["string"],
            },
        },
        project: "string",
        storageResources: [{
            config: {
                existingBucket: {
                    bucket: "string",
                },
                existingFilestore: {
                    filestore: "string",
                },
                existingLustre: {
                    lustre: "string",
                },
                newBucket: {
                    bucket: "string",
                    autoclass: {
                        enabled: false,
                    },
                    hierarchicalNamespace: {
                        enabled: false,
                    },
                    storageClass: "string",
                },
                newFilestore: {
                    fileShares: [{
                        capacityGb: "string",
                        fileShare: "string",
                    }],
                    filestore: "string",
                    tier: "string",
                    description: "string",
                    protocol: "string",
                },
                newLustre: {
                    capacityGb: "string",
                    filesystem: "string",
                    lustre: "string",
                    description: "string",
                },
            },
            id: "string",
            buckets: [{
                bucket: "string",
            }],
            filestores: [{
                filestore: "string",
            }],
            lustres: [{
                lustre: "string",
            }],
        }],
    });
    
    type: gcp:hypercomputecluster:Cluster
    properties:
        clusterId: string
        computeResources:
            - config:
                newFlexStartInstances:
                    machineType: string
                    maxDuration: string
                    zone: string
                newOnDemandInstances:
                    machineType: string
                    zone: string
                newReservedInstances:
                    reservation: string
                newSpotInstances:
                    machineType: string
                    terminationAction: string
                    zone: string
              id: string
        description: string
        labels:
            string: string
        location: string
        networkResources:
            - config:
                existingNetwork:
                    network: string
                    subnetwork: string
                newNetwork:
                    description: string
                    network: string
              id: string
              networks:
                - network: string
                  subnetwork: string
        orchestrator:
            slurm:
                defaultPartition: string
                epilogBashScripts:
                    - string
                loginNodes:
                    bootDisk:
                        sizeGb: string
                        type: string
                    count: string
                    enableOsLogin: false
                    enablePublicIps: false
                    instances:
                        - instance: string
                    labels:
                        string: string
                    machineType: string
                    startupScript: string
                    storageConfigs:
                        - id: string
                          localMount: string
                    zone: string
                nodeSets:
                    - computeId: string
                      computeInstance:
                        bootDisk:
                            sizeGb: string
                            type: string
                        labels:
                            string: string
                        startupScript: string
                      id: string
                      maxDynamicNodeCount: string
                      staticNodeCount: string
                      storageConfigs:
                        - id: string
                          localMount: string
                partitions:
                    - id: string
                      nodeSetIds:
                        - string
                prologBashScripts:
                    - string
        project: string
        storageResources:
            - buckets:
                - bucket: string
              config:
                existingBucket:
                    bucket: string
                existingFilestore:
                    filestore: string
                existingLustre:
                    lustre: string
                newBucket:
                    autoclass:
                        enabled: false
                    bucket: string
                    hierarchicalNamespace:
                        enabled: false
                    storageClass: string
                newFilestore:
                    description: string
                    fileShares:
                        - capacityGb: string
                          fileShare: string
                    filestore: string
                    protocol: string
                    tier: string
                newLustre:
                    capacityGb: string
                    description: string
                    filesystem: string
                    lustre: string
              filestores:
                - filestore: string
              id: string
              lustres:
                - lustre: string
    

    Cluster Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The Cluster resource accepts the following input properties:

    ClusterId string
    ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    Location string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    ComputeResources List<ClusterComputeResource>
    Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    Description string
    User-provided description of the cluster.
    Labels Dictionary<string, string>
    Labels applied to the cluster. Labels can be used to organize clusters and to filter them in queries. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    NetworkResources List<ClusterNetworkResource>
    Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    Orchestrator ClusterOrchestrator
    The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    StorageResources List<ClusterStorageResource>
    Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    ClusterId string
    ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    Location string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    ComputeResources []ClusterComputeResourceArgs
    Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    Description string
    User-provided description of the cluster.
    Labels map[string]string
    Labels applied to the cluster. Labels can be used to organize clusters and to filter them in queries. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    NetworkResources []ClusterNetworkResourceArgs
    Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    Orchestrator ClusterOrchestratorArgs
    The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    StorageResources []ClusterStorageResourceArgs
    Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    clusterId String
    ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    location String
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    computeResources List<ClusterComputeResource>
    Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    description String
    User-provided description of the cluster.
    labels Map<String,String>
    Labels applied to the cluster. Labels can be used to organize clusters and to filter them in queries. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    networkResources List<ClusterNetworkResource>
    Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    orchestrator ClusterOrchestrator
    The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    storageResources List<ClusterStorageResource>
    Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    clusterId string
    ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    location string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    computeResources ClusterComputeResource[]
    Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    description string
    User-provided description of the cluster.
    labels {[key: string]: string}
    Labels applied to the cluster. Labels can be used to organize clusters and to filter them in queries. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    networkResources ClusterNetworkResource[]
    Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    orchestrator ClusterOrchestrator
    The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    storageResources ClusterStorageResource[]
    Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    cluster_id str
    ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    location str
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    compute_resources Sequence[ClusterComputeResourceArgs]
    Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    description str
    User-provided description of the cluster.
    labels Mapping[str, str]
    Labels applied to the cluster. Labels can be used to organize clusters and to filter them in queries. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    network_resources Sequence[ClusterNetworkResourceArgs]
    Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    orchestrator ClusterOrchestratorArgs
    The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    storage_resources Sequence[ClusterStorageResourceArgs]
    Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    clusterId String
    ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    location String
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    computeResources List<Property Map>
    Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    description String
    User-provided description of the cluster.
    labels Map<String>
    Labels applied to the cluster. Labels can be used to organize clusters and to filter them in queries. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    networkResources List<Property Map>
    Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    orchestrator Property Map
    The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    storageResources List<Property Map>
    Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Cluster resource produces the following output properties:

    CreateTime string
    Time that the cluster was originally created.
    EffectiveLabels Dictionary<string, string>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Id string
    The provider-assigned unique ID for this managed resource.
    Name string
    Identifier. Relative resource name of the cluster, in the format projects/{project}/locations/{location}/clusters/{cluster}.
    PulumiLabels Dictionary<string, string>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    Reconciling bool
    Indicates whether changes to the cluster are currently in flight. If this is true, then the current state might not match the cluster's intended state.
    UpdateTime string
    Time that the cluster was most recently updated.
    CreateTime string
    Time that the cluster was originally created.
    EffectiveLabels map[string]string
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Id string
    The provider-assigned unique ID for this managed resource.
    Name string
    Identifier. Relative resource name of the cluster, in the format projects/{project}/locations/{location}/clusters/{cluster}.
    PulumiLabels map[string]string
    The combination of labels configured directly on the resource and default labels configured on the provider.
    Reconciling bool
    Indicates whether changes to the cluster are currently in flight. If this is true, then the current state might not match the cluster's intended state.
    UpdateTime string
    Time that the cluster was most recently updated.
    createTime String
    Time that the cluster was originally created.
    effectiveLabels Map<String,String>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id String
    The provider-assigned unique ID for this managed resource.
    name String
    Identifier. Relative resource name of the cluster, in the format projects/{project}/locations/{location}/clusters/{cluster}.
    pulumiLabels Map<String,String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    reconciling Boolean
    Indicates whether changes to the cluster are currently in flight. If this is true, then the current state might not match the cluster's intended state.
    updateTime String
    Time that the cluster was most recently updated.
    createTime string
    Time that the cluster was originally created.
    effectiveLabels {[key: string]: string}
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id string
    The provider-assigned unique ID for this managed resource.
    name string
    Identifier. Relative resource name of the cluster, in the format projects/{project}/locations/{location}/clusters/{cluster}.
    pulumiLabels {[key: string]: string}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    reconciling boolean
    Indicates whether changes to the cluster are currently in flight. If this is true, then the current state might not match the cluster's intended state.
    updateTime string
    Time that the cluster was most recently updated.
    create_time str
    Time that the cluster was originally created.
    effective_labels Mapping[str, str]
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id str
    The provider-assigned unique ID for this managed resource.
    name str
    Identifier. Relative resource name of the cluster, in the format projects/{project}/locations/{location}/clusters/{cluster}.
    pulumi_labels Mapping[str, str]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    reconciling bool
    Indicates whether changes to the cluster are currently in flight. If this is true, then the current state might not match the cluster's intended state.
    update_time str
    Time that the cluster was most recently updated.
    createTime String
    Time that the cluster was originally created.
    effectiveLabels Map<String>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id String
    The provider-assigned unique ID for this managed resource.
    name String
    Identifier. Relative resource name of the cluster, in the format projects/{project}/locations/{location}/clusters/{cluster}.
    pulumiLabels Map<String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    reconciling Boolean
    Indicates whether changes to the cluster are currently in flight. If this is true, then the current state might not match the cluster's intended state.
    updateTime String
    Time that the cluster was most recently updated.

    Look up Existing Cluster Resource

    Get an existing Cluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: ClusterState, opts?: CustomResourceOptions): Cluster
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            cluster_id: Optional[str] = None,
            compute_resources: Optional[Sequence[ClusterComputeResourceArgs]] = None,
            create_time: Optional[str] = None,
            description: Optional[str] = None,
            effective_labels: Optional[Mapping[str, str]] = None,
            labels: Optional[Mapping[str, str]] = None,
            location: Optional[str] = None,
            name: Optional[str] = None,
            network_resources: Optional[Sequence[ClusterNetworkResourceArgs]] = None,
            orchestrator: Optional[ClusterOrchestratorArgs] = None,
            project: Optional[str] = None,
            pulumi_labels: Optional[Mapping[str, str]] = None,
            reconciling: Optional[bool] = None,
            storage_resources: Optional[Sequence[ClusterStorageResourceArgs]] = None,
            update_time: Optional[str] = None) -> Cluster
    func GetCluster(ctx *Context, name string, id IDInput, state *ClusterState, opts ...ResourceOption) (*Cluster, error)
    public static Cluster Get(string name, Input<string> id, ClusterState? state, CustomResourceOptions? opts = null)
    public static Cluster get(String name, Output<String> id, ClusterState state, CustomResourceOptions options)
    resources:  _:    type: gcp:hypercomputecluster:Cluster    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    ClusterId string
    ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    ComputeResources List<ClusterComputeResource>
    Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    CreateTime string
    Time that the cluster was originally created.
    Description string
    User-provided description of the cluster.
    EffectiveLabels Dictionary<string, string>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Labels Dictionary<string, string>
    Labels applied to the cluster. Labels can be used to organize clusters and to filter them in queries. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    Location string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    Name string
    Identifier. Relative resource name of the cluster, in the format projects/{project}/locations/{location}/clusters/{cluster}.
    NetworkResources List<ClusterNetworkResource>
    Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    Orchestrator ClusterOrchestrator
    The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PulumiLabels Dictionary<string, string>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    Reconciling bool
    Indicates whether changes to the cluster are currently in flight. If this is true, then the current state might not match the cluster's intended state.
    StorageResources List<ClusterStorageResource>
    Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    UpdateTime string
    Time that the cluster was most recently updated.
    ClusterId string
    ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    ComputeResources []ClusterComputeResourceArgs
    Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    CreateTime string
    Time that the cluster was originally created.
    Description string
    User-provided description of the cluster.
    EffectiveLabels map[string]string
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Labels map[string]string
    Labels applied to the cluster. Labels can be used to organize clusters and to filter them in queries. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    Location string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    Name string
    Identifier. Relative resource name of the cluster, in the format projects/{project}/locations/{location}/clusters/{cluster}.
    NetworkResources []ClusterNetworkResourceArgs
    Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    Orchestrator ClusterOrchestratorArgs
    The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PulumiLabels map[string]string
    The combination of labels configured directly on the resource and default labels configured on the provider.
    Reconciling bool
    Indicates whether changes to the cluster are currently in flight. If this is true, then the current state might not match the cluster's intended state.
    StorageResources []ClusterStorageResourceArgs
    Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    UpdateTime string
    Time that the cluster was most recently updated.
    clusterId String
    ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    computeResources List<ClusterComputeResource>
    Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    createTime String
    Time that the cluster was originally created.
    description String
    User-provided description of the cluster.
    effectiveLabels Map<String,String>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    labels Map<String,String>
    Labels applied to the cluster. Labels can be used to organize clusters and to filter them in queries. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    location String
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    name String
    Identifier. Relative resource name of the cluster, in the format projects/{project}/locations/{location}/clusters/{cluster}.
    networkResources List<ClusterNetworkResource>
    Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    orchestrator ClusterOrchestrator
    The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels Map<String,String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    reconciling Boolean
    Indicates whether changes to the cluster are currently in flight. If this is true, then the current state might not match the cluster's intended state.
    storageResources List<ClusterStorageResource>
    Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    updateTime String
    Time that the cluster was most recently updated.
    clusterId string
    ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    computeResources ClusterComputeResource[]
    Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    createTime string
    Time that the cluster was originally created.
    description string
    User-provided description of the cluster.
    effectiveLabels {[key: string]: string}
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    labels {[key: string]: string}
    Labels applied to the cluster. Labels can be used to organize clusters and to filter them in queries. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    location string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    name string
    Identifier. Relative resource name of the cluster, in the format projects/{project}/locations/{location}/clusters/{cluster}.
    networkResources ClusterNetworkResource[]
    Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    orchestrator ClusterOrchestrator
    The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels {[key: string]: string}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    reconciling boolean
    Indicates whether changes to the cluster are currently in flight. If this is true, then the current state might not match the cluster's intended state.
    storageResources ClusterStorageResource[]
    Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    updateTime string
    Time that the cluster was most recently updated.
    cluster_id str
    ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    compute_resources Sequence[ClusterComputeResourceArgs]
    Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    create_time str
    Time that the cluster was originally created.
    description str
    User-provided description of the cluster.
    effective_labels Mapping[str, str]
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    labels Mapping[str, str]
    Labels applied to the cluster. Labels can be used to organize clusters and to filter them in queries. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    location str
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    name str
    Identifier. Relative resource name of the cluster, in the format projects/{project}/locations/{location}/clusters/{cluster}.
    network_resources Sequence[ClusterNetworkResourceArgs]
    Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    orchestrator ClusterOrchestratorArgs
    The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumi_labels Mapping[str, str]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    reconciling bool
    Indicates whether changes to the cluster are currently in flight. If this is true, then the current state might not match the cluster's intended state.
    storage_resources Sequence[ClusterStorageResourceArgs]
    Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    update_time str
    Time that the cluster was most recently updated.
    clusterId String
    ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    computeResources List<Property Map>
    Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    createTime String
    Time that the cluster was originally created.
    description String
    User-provided description of the cluster.
    effectiveLabels Map<String>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    labels Map<String>
    Labels applied to the cluster. Labels can be used to organize clusters and to filter them in queries. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    location String
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    name String
    Identifier. Relative resource name of the cluster, in the format projects/{project}/locations/{location}/clusters/{cluster}.
    networkResources List<Property Map>
    Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    orchestrator Property Map
    The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels Map<String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    reconciling Boolean
    Indicates whether changes to the cluster are currently in flight. If this is true, then the current state might not match the cluster's intended state.
    storageResources List<Property Map>
    Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
    updateTime String
    Time that the cluster was most recently updated.

    Supporting Types

    ClusterComputeResource, ClusterComputeResourceArgs

    Config ClusterComputeResourceConfig
    Describes how a compute resource should be created at runtime. Structure is documented below.
    Id string
    The identifier for this object. Format specified above.
    Config ClusterComputeResourceConfig
    Describes how a compute resource should be created at runtime. Structure is documented below.
    Id string
    The identifier for this object. Format specified above.
    config ClusterComputeResourceConfig
    Describes how a compute resource should be created at runtime. Structure is documented below.
    id String
    The identifier for this object. Format specified above.
    config ClusterComputeResourceConfig
    Describes how a compute resource should be created at runtime. Structure is documented below.
    id string
    The identifier for this object. Format specified above.
    config ClusterComputeResourceConfig
    Describes how a compute resource should be created at runtime. Structure is documented below.
    id str
    The identifier for this object. Format specified above.
    config Property Map
    Describes how a compute resource should be created at runtime. Structure is documented below.
    id String
    The identifier for this object. Format specified above.

    ClusterComputeResourceConfig, ClusterComputeResourceConfigArgs

    NewFlexStartInstances ClusterComputeResourceConfigNewFlexStartInstances
    When set in a ComputeResourceConfig, indicates that VM instances should be created using Flex Start. Structure is documented below.
    NewOnDemandInstances ClusterComputeResourceConfigNewOnDemandInstances
    When set in a ComputeResourceConfig, indicates that on-demand (i.e., using the standard provisioning model) VM instances should be created. Structure is documented below.
    NewReservedInstances ClusterComputeResourceConfigNewReservedInstances
    When set in a ComputeResourceConfig, indicates that VM instances should be created from a reservation. Structure is documented below.
    NewSpotInstances ClusterComputeResourceConfigNewSpotInstances
    When set in a ComputeResourceConfig, indicates that spot VM instances should be created. Structure is documented below.
    NewFlexStartInstances ClusterComputeResourceConfigNewFlexStartInstances
    When set in a ComputeResourceConfig, indicates that VM instances should be created using Flex Start. Structure is documented below.
    NewOnDemandInstances ClusterComputeResourceConfigNewOnDemandInstances
    When set in a ComputeResourceConfig, indicates that on-demand (i.e., using the standard provisioning model) VM instances should be created. Structure is documented below.
    NewReservedInstances ClusterComputeResourceConfigNewReservedInstances
    When set in a ComputeResourceConfig, indicates that VM instances should be created from a reservation. Structure is documented below.
    NewSpotInstances ClusterComputeResourceConfigNewSpotInstances
    When set in a ComputeResourceConfig, indicates that spot VM instances should be created. Structure is documented below.
    newFlexStartInstances ClusterComputeResourceConfigNewFlexStartInstances
    When set in a ComputeResourceConfig, indicates that VM instances should be created using Flex Start. Structure is documented below.
    newOnDemandInstances ClusterComputeResourceConfigNewOnDemandInstances
    When set in a ComputeResourceConfig, indicates that on-demand (i.e., using the standard provisioning model) VM instances should be created. Structure is documented below.
    newReservedInstances ClusterComputeResourceConfigNewReservedInstances
    When set in a ComputeResourceConfig, indicates that VM instances should be created from a reservation. Structure is documented below.
    newSpotInstances ClusterComputeResourceConfigNewSpotInstances
    When set in a ComputeResourceConfig, indicates that spot VM instances should be created. Structure is documented below.
    newFlexStartInstances ClusterComputeResourceConfigNewFlexStartInstances
    When set in a ComputeResourceConfig, indicates that VM instances should be created using Flex Start. Structure is documented below.
    newOnDemandInstances ClusterComputeResourceConfigNewOnDemandInstances
    When set in a ComputeResourceConfig, indicates that on-demand (i.e., using the standard provisioning model) VM instances should be created. Structure is documented below.
    newReservedInstances ClusterComputeResourceConfigNewReservedInstances
    When set in a ComputeResourceConfig, indicates that VM instances should be created from a reservation. Structure is documented below.
    newSpotInstances ClusterComputeResourceConfigNewSpotInstances
    When set in a ComputeResourceConfig, indicates that spot VM instances should be created. Structure is documented below.
    new_flex_start_instances ClusterComputeResourceConfigNewFlexStartInstances
    When set in a ComputeResourceConfig, indicates that VM instances should be created using Flex Start. Structure is documented below.
    new_on_demand_instances ClusterComputeResourceConfigNewOnDemandInstances
    When set in a ComputeResourceConfig, indicates that on-demand (i.e., using the standard provisioning model) VM instances should be created. Structure is documented below.
    new_reserved_instances ClusterComputeResourceConfigNewReservedInstances
    When set in a ComputeResourceConfig, indicates that VM instances should be created from a reservation. Structure is documented below.
    new_spot_instances ClusterComputeResourceConfigNewSpotInstances
    When set in a ComputeResourceConfig, indicates that spot VM instances should be created. Structure is documented below.
    newFlexStartInstances Property Map
    When set in a ComputeResourceConfig, indicates that VM instances should be created using Flex Start. Structure is documented below.
    newOnDemandInstances Property Map
    When set in a ComputeResourceConfig, indicates that on-demand (i.e., using the standard provisioning model) VM instances should be created. Structure is documented below.
    newReservedInstances Property Map
    When set in a ComputeResourceConfig, indicates that VM instances should be created from a reservation. Structure is documented below.
    newSpotInstances Property Map
    When set in a ComputeResourceConfig, indicates that spot VM instances should be created. Structure is documented below.

    ClusterComputeResourceConfigNewFlexStartInstances, ClusterComputeResourceConfigNewFlexStartInstancesArgs

    MachineType string
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    MaxDuration string
    Specifies the time limit for created instances. Instances will be terminated at the end of this duration.
    Zone string
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    MachineType string
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    MaxDuration string
    Specifies the time limit for created instances. Instances will be terminated at the end of this duration.
    Zone string
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    machineType String
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    maxDuration String
    Specifies the time limit for created instances. Instances will be terminated at the end of this duration.
    zone String
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    machineType string
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    maxDuration string
    Specifies the time limit for created instances. Instances will be terminated at the end of this duration.
    zone string
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    machine_type str
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    max_duration str
    Specifies the time limit for created instances. Instances will be terminated at the end of this duration.
    zone str
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    machineType String
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    maxDuration String
    Specifies the time limit for created instances. Instances will be terminated at the end of this duration.
    zone String
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.

    ClusterComputeResourceConfigNewOnDemandInstances, ClusterComputeResourceConfigNewOnDemandInstancesArgs

    MachineType string
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    Zone string
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    MachineType string
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    Zone string
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    machineType String
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    zone String
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    machineType string
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    zone string
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    machine_type str
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    zone str
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    machineType String
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    zone String
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.

    ClusterComputeResourceConfigNewReservedInstances, ClusterComputeResourceConfigNewReservedInstancesArgs

    Reservation string
    Name of the reservation from which VM instances should be created, in the format projects/{project}/zones/{zone}/reservations/{reservation}.
    Reservation string
    Name of the reservation from which VM instances should be created, in the format projects/{project}/zones/{zone}/reservations/{reservation}.
    reservation String
    Name of the reservation from which VM instances should be created, in the format projects/{project}/zones/{zone}/reservations/{reservation}.
    reservation string
    Name of the reservation from which VM instances should be created, in the format projects/{project}/zones/{zone}/reservations/{reservation}.
    reservation str
    Name of the reservation from which VM instances should be created, in the format projects/{project}/zones/{zone}/reservations/{reservation}.
    reservation String
    Name of the reservation from which VM instances should be created, in the format projects/{project}/zones/{zone}/reservations/{reservation}.

    ClusterComputeResourceConfigNewSpotInstances, ClusterComputeResourceConfigNewSpotInstancesArgs

    MachineType string
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    Zone string
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    TerminationAction string
    Specifies the termination action of the instance Possible values: STOP DELETE
    MachineType string
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    Zone string
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    TerminationAction string
    Specifies the termination action of the instance Possible values: STOP DELETE
    machineType String
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    zone String
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    terminationAction String
    Specifies the termination action of the instance Possible values: STOP DELETE
    machineType string
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    zone string
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    terminationAction string
    Specifies the termination action of the instance Possible values: STOP DELETE
    machine_type str
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    zone str
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    termination_action str
    Specifies the termination action of the instance Possible values: STOP DELETE
    machineType String
    Name of the Compute Engine machine type to use, e.g. n2-standard-2.
    zone String
    Name of the zone in which VM instances should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    terminationAction String
    Specifies the termination action of the instance Possible values: STOP DELETE

    ClusterNetworkResource, ClusterNetworkResourceArgs

    Id string
    The identifier for this object. Format specified above.
    Config ClusterNetworkResourceConfig
    Describes how a network resource should be initialized. Each network resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
    Networks List<ClusterNetworkResourceNetwork>
    (Output) A reference to a VPC network in Google Compute Engine. Structure is documented below.
    Id string
    The identifier for this object. Format specified above.
    Config ClusterNetworkResourceConfig
    Describes how a network resource should be initialized. Each network resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
    Networks []ClusterNetworkResourceNetwork
    (Output) A reference to a VPC network in Google Compute Engine. Structure is documented below.
    id String
    The identifier for this object. Format specified above.
    config ClusterNetworkResourceConfig
    Describes how a network resource should be initialized. Each network resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
    networks List<ClusterNetworkResourceNetwork>
    (Output) A reference to a VPC network in Google Compute Engine. Structure is documented below.
    id string
    The identifier for this object. Format specified above.
    config ClusterNetworkResourceConfig
    Describes how a network resource should be initialized. Each network resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
    networks ClusterNetworkResourceNetwork[]
    (Output) A reference to a VPC network in Google Compute Engine. Structure is documented below.
    id str
    The identifier for this object. Format specified above.
    config ClusterNetworkResourceConfig
    Describes how a network resource should be initialized. Each network resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
    networks Sequence[ClusterNetworkResourceNetwork]
    (Output) A reference to a VPC network in Google Compute Engine. Structure is documented below.
    id String
    The identifier for this object. Format specified above.
    config Property Map
    Describes how a network resource should be initialized. Each network resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
    networks List<Property Map>
    (Output) A reference to a VPC network in Google Compute Engine. Structure is documented below.

    ClusterNetworkResourceConfig, ClusterNetworkResourceConfigArgs

    ExistingNetwork ClusterNetworkResourceConfigExistingNetwork
    When set in a NetworkResourceConfig, indicates that an existing network should be imported. Structure is documented below.
    NewNetwork ClusterNetworkResourceConfigNewNetwork
    When set in a NetworkResourceConfig, indicates that a new network should be created. Structure is documented below.
    ExistingNetwork ClusterNetworkResourceConfigExistingNetwork
    When set in a NetworkResourceConfig, indicates that an existing network should be imported. Structure is documented below.
    NewNetwork ClusterNetworkResourceConfigNewNetwork
    When set in a NetworkResourceConfig, indicates that a new network should be created. Structure is documented below.
    existingNetwork ClusterNetworkResourceConfigExistingNetwork
    When set in a NetworkResourceConfig, indicates that an existing network should be imported. Structure is documented below.
    newNetwork ClusterNetworkResourceConfigNewNetwork
    When set in a NetworkResourceConfig, indicates that a new network should be created. Structure is documented below.
    existingNetwork ClusterNetworkResourceConfigExistingNetwork
    When set in a NetworkResourceConfig, indicates that an existing network should be imported. Structure is documented below.
    newNetwork ClusterNetworkResourceConfigNewNetwork
    When set in a NetworkResourceConfig, indicates that a new network should be created. Structure is documented below.
    existing_network ClusterNetworkResourceConfigExistingNetwork
    When set in a NetworkResourceConfig, indicates that an existing network should be imported. Structure is documented below.
    new_network ClusterNetworkResourceConfigNewNetwork
    When set in a NetworkResourceConfig, indicates that a new network should be created. Structure is documented below.
    existingNetwork Property Map
    When set in a NetworkResourceConfig, indicates that an existing network should be imported. Structure is documented below.
    newNetwork Property Map
    When set in a NetworkResourceConfig, indicates that a new network should be created. Structure is documented below.

    ClusterNetworkResourceConfigExistingNetwork, ClusterNetworkResourceConfigExistingNetworkArgs

    Network string
    Name of the network to import, in the format projects/{project}/global/networks/{network}.
    Subnetwork string
    Particular subnetwork to use, in the format projects/{project}/regions/{region}/subnetworks/{subnetwork}.
    Network string
    Name of the network to import, in the format projects/{project}/global/networks/{network}.
    Subnetwork string
    Particular subnetwork to use, in the format projects/{project}/regions/{region}/subnetworks/{subnetwork}.
    network String
    Name of the network to import, in the format projects/{project}/global/networks/{network}.
    subnetwork String
    Particular subnetwork to use, in the format projects/{project}/regions/{region}/subnetworks/{subnetwork}.
    network string
    Name of the network to import, in the format projects/{project}/global/networks/{network}.
    subnetwork string
    Particular subnetwork to use, in the format projects/{project}/regions/{region}/subnetworks/{subnetwork}.
    network str
    Name of the network to import, in the format projects/{project}/global/networks/{network}.
    subnetwork str
    Particular subnetwork to use, in the format projects/{project}/regions/{region}/subnetworks/{subnetwork}.
    network String
    Name of the network to import, in the format projects/{project}/global/networks/{network}.
    subnetwork String
    Particular subnetwork to use, in the format projects/{project}/regions/{region}/subnetworks/{subnetwork}.

    ClusterNetworkResourceConfigNewNetwork, ClusterNetworkResourceConfigNewNetworkArgs

    Network string
    (Output) Name of the network, in the format projects/{project}/global/networks/{network}.
    Description string
    Description of the network. Maximum of 2048 characters.
    Network string
    (Output) Name of the network, in the format projects/{project}/global/networks/{network}.
    Description string
    Description of the network. Maximum of 2048 characters.
    network String
    (Output) Name of the network, in the format projects/{project}/global/networks/{network}.
    description String
    Description of the network. Maximum of 2048 characters.
    network string
    (Output) Name of the network, in the format projects/{project}/global/networks/{network}.
    description string
    Description of the network. Maximum of 2048 characters.
    network str
    (Output) Name of the network, in the format projects/{project}/global/networks/{network}.
    description str
    Description of the network. Maximum of 2048 characters.
    network String
    (Output) Name of the network, in the format projects/{project}/global/networks/{network}.
    description String
    Description of the network. Maximum of 2048 characters.

    ClusterNetworkResourceNetwork, ClusterNetworkResourceNetworkArgs

    Network string
    Name of the network, in the format 'projects/{project}/global/networks/{network}'.
    Subnetwork string
    Name of the particular subnetwork being used by the cluster, in the format 'projects/{project}/regions/{region}/subnetworks/{subnetwork}'.
    Network string
    Name of the network, in the format 'projects/{project}/global/networks/{network}'.
    Subnetwork string
    Name of the particular subnetwork being used by the cluster, in the format 'projects/{project}/regions/{region}/subnetworks/{subnetwork}'.
    network String
    Name of the network, in the format 'projects/{project}/global/networks/{network}'.
    subnetwork String
    Name of the particular subnetwork being used by the cluster, in the format 'projects/{project}/regions/{region}/subnetworks/{subnetwork}'.
    network string
    Name of the network, in the format 'projects/{project}/global/networks/{network}'.
    subnetwork string
    Name of the particular subnetwork being used by the cluster, in the format 'projects/{project}/regions/{region}/subnetworks/{subnetwork}'.
    network str
    Name of the network, in the format 'projects/{project}/global/networks/{network}'.
    subnetwork str
    Name of the particular subnetwork being used by the cluster, in the format 'projects/{project}/regions/{region}/subnetworks/{subnetwork}'.
    network String
    Name of the network, in the format 'projects/{project}/global/networks/{network}'.
    subnetwork String
    Name of the particular subnetwork being used by the cluster, in the format 'projects/{project}/regions/{region}/subnetworks/{subnetwork}'.

    ClusterOrchestrator, ClusterOrchestratorArgs

    Slurm ClusterOrchestratorSlurm
    When set in Orchestrator, indicates that the cluster should use Slurm as the orchestrator. Structure is documented below.
    Slurm ClusterOrchestratorSlurm
    When set in Orchestrator, indicates that the cluster should use Slurm as the orchestrator. Structure is documented below.
    slurm ClusterOrchestratorSlurm
    When set in Orchestrator, indicates that the cluster should use Slurm as the orchestrator. Structure is documented below.
    slurm ClusterOrchestratorSlurm
    When set in Orchestrator, indicates that the cluster should use Slurm as the orchestrator. Structure is documented below.
    slurm ClusterOrchestratorSlurm
    When set in Orchestrator, indicates that the cluster should use Slurm as the orchestrator. Structure is documented below.
    slurm Property Map
    When set in Orchestrator, indicates that the cluster should use Slurm as the orchestrator. Structure is documented below.

    ClusterOrchestratorSlurm, ClusterOrchestratorSlurmArgs

    LoginNodes ClusterOrchestratorSlurmLoginNodes
    Configuration for Slurm login nodes in the cluster. Login nodes are Compute Engine VM instances that allow users to access the cluster over SSH. Structure is documented below.
    NodeSets List<ClusterOrchestratorSlurmNodeSet>
    Configuration of Slurm nodesets, which define groups of compute resources that can be used by Slurm. At least one compute node is required. Structure is documented below.
    Partitions List<ClusterOrchestratorSlurmPartition>
    Configuration of Slurm partitions, which group one or more nodesets. Acts as a queue against which jobs can be submitted. At least one partition is required. Structure is documented below.
    DefaultPartition string
    Default partition to use for submitted jobs that do not explicitly specify a partition. Required if and only if there is more than one partition, in which case it must match the id of one of the partitions.
    EpilogBashScripts List<string>
    Slurm epilog scripts, which will be executed by compute nodes whenever a node finishes running a job. Values must not be empty.
    PrologBashScripts List<string>
    Slurm prolog scripts, which will be executed by compute nodes before a node begins running a new job. Values must not be empty.
    LoginNodes ClusterOrchestratorSlurmLoginNodes
    Configuration for Slurm login nodes in the cluster. Login nodes are Compute Engine VM instances that allow users to access the cluster over SSH. Structure is documented below.
    NodeSets []ClusterOrchestratorSlurmNodeSet
    Configuration of Slurm nodesets, which define groups of compute resources that can be used by Slurm. At least one compute node is required. Structure is documented below.
    Partitions []ClusterOrchestratorSlurmPartition
    Configuration of Slurm partitions, which group one or more nodesets. Acts as a queue against which jobs can be submitted. At least one partition is required. Structure is documented below.
    DefaultPartition string
    Default partition to use for submitted jobs that do not explicitly specify a partition. Required if and only if there is more than one partition, in which case it must match the id of one of the partitions.
    EpilogBashScripts []string
    Slurm epilog scripts, which will be executed by compute nodes whenever a node finishes running a job. Values must not be empty.
    PrologBashScripts []string
    Slurm prolog scripts, which will be executed by compute nodes before a node begins running a new job. Values must not be empty.
    loginNodes ClusterOrchestratorSlurmLoginNodes
    Configuration for Slurm login nodes in the cluster. Login nodes are Compute Engine VM instances that allow users to access the cluster over SSH. Structure is documented below.
    nodeSets List<ClusterOrchestratorSlurmNodeSet>
    Configuration of Slurm nodesets, which define groups of compute resources that can be used by Slurm. At least one compute node is required. Structure is documented below.
    partitions List<ClusterOrchestratorSlurmPartition>
    Configuration of Slurm partitions, which group one or more nodesets. Acts as a queue against which jobs can be submitted. At least one partition is required. Structure is documented below.
    defaultPartition String
    Default partition to use for submitted jobs that do not explicitly specify a partition. Required if and only if there is more than one partition, in which case it must match the id of one of the partitions.
    epilogBashScripts List<String>
    Slurm epilog scripts, which will be executed by compute nodes whenever a node finishes running a job. Values must not be empty.
    prologBashScripts List<String>
    Slurm prolog scripts, which will be executed by compute nodes before a node begins running a new job. Values must not be empty.
    loginNodes ClusterOrchestratorSlurmLoginNodes
    Configuration for Slurm login nodes in the cluster. Login nodes are Compute Engine VM instances that allow users to access the cluster over SSH. Structure is documented below.
    nodeSets ClusterOrchestratorSlurmNodeSet[]
    Configuration of Slurm nodesets, which define groups of compute resources that can be used by Slurm. At least one compute node is required. Structure is documented below.
    partitions ClusterOrchestratorSlurmPartition[]
    Configuration of Slurm partitions, which group one or more nodesets. Acts as a queue against which jobs can be submitted. At least one partition is required. Structure is documented below.
    defaultPartition string
    Default partition to use for submitted jobs that do not explicitly specify a partition. Required if and only if there is more than one partition, in which case it must match the id of one of the partitions.
    epilogBashScripts string[]
    Slurm epilog scripts, which will be executed by compute nodes whenever a node finishes running a job. Values must not be empty.
    prologBashScripts string[]
    Slurm prolog scripts, which will be executed by compute nodes before a node begins running a new job. Values must not be empty.
    login_nodes ClusterOrchestratorSlurmLoginNodes
    Configuration for Slurm login nodes in the cluster. Login nodes are Compute Engine VM instances that allow users to access the cluster over SSH. Structure is documented below.
    node_sets Sequence[ClusterOrchestratorSlurmNodeSet]
    Configuration of Slurm nodesets, which define groups of compute resources that can be used by Slurm. At least one compute node is required. Structure is documented below.
    partitions Sequence[ClusterOrchestratorSlurmPartition]
    Configuration of Slurm partitions, which group one or more nodesets. Acts as a queue against which jobs can be submitted. At least one partition is required. Structure is documented below.
    default_partition str
    Default partition to use for submitted jobs that do not explicitly specify a partition. Required if and only if there is more than one partition, in which case it must match the id of one of the partitions.
    epilog_bash_scripts Sequence[str]
    Slurm epilog scripts, which will be executed by compute nodes whenever a node finishes running a job. Values must not be empty.
    prolog_bash_scripts Sequence[str]
    Slurm prolog scripts, which will be executed by compute nodes before a node begins running a new job. Values must not be empty.
    loginNodes Property Map
    Configuration for Slurm login nodes in the cluster. Login nodes are Compute Engine VM instances that allow users to access the cluster over SSH. Structure is documented below.
    nodeSets List<Property Map>
    Configuration of Slurm nodesets, which define groups of compute resources that can be used by Slurm. At least one compute node is required. Structure is documented below.
    partitions List<Property Map>
    Configuration of Slurm partitions, which group one or more nodesets. Acts as a queue against which jobs can be submitted. At least one partition is required. Structure is documented below.
    defaultPartition String
    Default partition to use for submitted jobs that do not explicitly specify a partition. Required if and only if there is more than one partition, in which case it must match the id of one of the partitions.
    epilogBashScripts List<String>
    Slurm epilog scripts, which will be executed by compute nodes whenever a node finishes running a job. Values must not be empty.
    prologBashScripts List<String>
    Slurm prolog scripts, which will be executed by compute nodes before a node begins running a new job. Values must not be empty.

    ClusterOrchestratorSlurmLoginNodes, ClusterOrchestratorSlurmLoginNodesArgs

    Count string
    Number of login node instances to create.
    MachineType string
    Name of the Compute Engine machine type to use for login nodes, e.g. n2-standard-2.
    Zone string
    Name of the zone in which login nodes should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    BootDisk ClusterOrchestratorSlurmLoginNodesBootDisk
    A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
    EnableOsLogin bool
    Whether OS Login should be enabled on login node instances.
    EnablePublicIps bool
    Whether login node instances should be assigned external IP addresses.
    Instances List<ClusterOrchestratorSlurmLoginNodesInstance>
    (Output) Information about the login node instances that were created in Compute Engine. Structure is documented below.
    Labels Dictionary<string, string>
    Labels that should be applied to each login node instance.
    StartupScript string
    Startup script to be run on each login node instance. Max 256KB. The script must complete within the system-defined default timeout of 5 minutes. For tasks that require more time, consider running them in the background using methods such as & or nohup.
    StorageConfigs List<ClusterOrchestratorSlurmLoginNodesStorageConfig>
    How storage resources should be mounted on each login node. Structure is documented below.
    Count string
    Number of login node instances to create.
    MachineType string
    Name of the Compute Engine machine type to use for login nodes, e.g. n2-standard-2.
    Zone string
    Name of the zone in which login nodes should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    BootDisk ClusterOrchestratorSlurmLoginNodesBootDisk
    A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
    EnableOsLogin bool
    Whether OS Login should be enabled on login node instances.
    EnablePublicIps bool
    Whether login node instances should be assigned external IP addresses.
    Instances []ClusterOrchestratorSlurmLoginNodesInstance
    (Output) Information about the login node instances that were created in Compute Engine. Structure is documented below.
    Labels map[string]string
    Labels that should be applied to each login node instance.
    StartupScript string
    Startup script to be run on each login node instance. Max 256KB. The script must complete within the system-defined default timeout of 5 minutes. For tasks that require more time, consider running them in the background using methods such as & or nohup.
    StorageConfigs []ClusterOrchestratorSlurmLoginNodesStorageConfig
    How storage resources should be mounted on each login node. Structure is documented below.
    count String
    Number of login node instances to create.
    machineType String
    Name of the Compute Engine machine type to use for login nodes, e.g. n2-standard-2.
    zone String
    Name of the zone in which login nodes should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    bootDisk ClusterOrchestratorSlurmLoginNodesBootDisk
    A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
    enableOsLogin Boolean
    Whether OS Login should be enabled on login node instances.
    enablePublicIps Boolean
    Whether login node instances should be assigned external IP addresses.
    instances List<ClusterOrchestratorSlurmLoginNodesInstance>
    (Output) Information about the login node instances that were created in Compute Engine. Structure is documented below.
    labels Map<String,String>
    Labels that should be applied to each login node instance.
    startupScript String
    Startup script to be run on each login node instance. Max 256KB. The script must complete within the system-defined default timeout of 5 minutes. For tasks that require more time, consider running them in the background using methods such as & or nohup.
    storageConfigs List<ClusterOrchestratorSlurmLoginNodesStorageConfig>
    How storage resources should be mounted on each login node. Structure is documented below.
    count string
    Number of login node instances to create.
    machineType string
    Name of the Compute Engine machine type to use for login nodes, e.g. n2-standard-2.
    zone string
    Name of the zone in which login nodes should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    bootDisk ClusterOrchestratorSlurmLoginNodesBootDisk
    A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
    enableOsLogin boolean
    Whether OS Login should be enabled on login node instances.
    enablePublicIps boolean
    Whether login node instances should be assigned external IP addresses.
    instances ClusterOrchestratorSlurmLoginNodesInstance[]
    (Output) Information about the login node instances that were created in Compute Engine. Structure is documented below.
    labels {[key: string]: string}
    Labels that should be applied to each login node instance.
    startupScript string
    Startup script to be run on each login node instance. Max 256KB. The script must complete within the system-defined default timeout of 5 minutes. For tasks that require more time, consider running them in the background using methods such as & or nohup.
    storageConfigs ClusterOrchestratorSlurmLoginNodesStorageConfig[]
    How storage resources should be mounted on each login node. Structure is documented below.
    count str
    Number of login node instances to create.
    machine_type str
    Name of the Compute Engine machine type to use for login nodes, e.g. n2-standard-2.
    zone str
    Name of the zone in which login nodes should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    boot_disk ClusterOrchestratorSlurmLoginNodesBootDisk
    A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
    enable_os_login bool
    Whether OS Login should be enabled on login node instances.
    enable_public_ips bool
    Whether login node instances should be assigned external IP addresses.
    instances Sequence[ClusterOrchestratorSlurmLoginNodesInstance]
    (Output) Information about the login node instances that were created in Compute Engine. Structure is documented below.
    labels Mapping[str, str]
    Labels that should be applied to each login node instance.
    startup_script str
    Startup script to be run on each login node instance. Max 256KB. The script must complete within the system-defined default timeout of 5 minutes. For tasks that require more time, consider running them in the background using methods such as & or nohup.
    storage_configs Sequence[ClusterOrchestratorSlurmLoginNodesStorageConfig]
    How storage resources should be mounted on each login node. Structure is documented below.
    count String
    Number of login node instances to create.
    machineType String
    Name of the Compute Engine machine type to use for login nodes, e.g. n2-standard-2.
    zone String
    Name of the zone in which login nodes should run, e.g., us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
    bootDisk Property Map
    A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
    enableOsLogin Boolean
    Whether OS Login should be enabled on login node instances.
    enablePublicIps Boolean
    Whether login node instances should be assigned external IP addresses.
    instances List<Property Map>
    (Output) Information about the login node instances that were created in Compute Engine. Structure is documented below.
    labels Map<String>
    Labels that should be applied to each login node instance.
    startupScript String
    Startup script to be run on each login node instance. Max 256KB. The script must complete within the system-defined default timeout of 5 minutes. For tasks that require more time, consider running them in the background using methods such as & or nohup.
    storageConfigs List<Property Map>
    How storage resources should be mounted on each login node. Structure is documented below.

    ClusterOrchestratorSlurmLoginNodesBootDisk, ClusterOrchestratorSlurmLoginNodesBootDiskArgs

    SizeGb string
    Size of the disk in gigabytes. Must be at least 10GB.
    Type string
    Persistent disk type, in the format projects/{project}/zones/{zone}/diskTypes/{disk_type}.
    SizeGb string
    Size of the disk in gigabytes. Must be at least 10GB.
    Type string
    Persistent disk type, in the format projects/{project}/zones/{zone}/diskTypes/{disk_type}.
    sizeGb String
    Size of the disk in gigabytes. Must be at least 10GB.
    type String
    Persistent disk type, in the format projects/{project}/zones/{zone}/diskTypes/{disk_type}.
    sizeGb string
    Size of the disk in gigabytes. Must be at least 10GB.
    type string
    Persistent disk type, in the format projects/{project}/zones/{zone}/diskTypes/{disk_type}.
    size_gb str
    Size of the disk in gigabytes. Must be at least 10GB.
    type str
    Persistent disk type, in the format projects/{project}/zones/{zone}/diskTypes/{disk_type}.
    sizeGb String
    Size of the disk in gigabytes. Must be at least 10GB.
    type String
    Persistent disk type, in the format projects/{project}/zones/{zone}/diskTypes/{disk_type}.

    ClusterOrchestratorSlurmLoginNodesInstance, ClusterOrchestratorSlurmLoginNodesInstanceArgs

    Instance string
    Name of the VM instance, in the format 'projects/{project}/zones/{zone}/instances/{instance}'.
    Instance string
    Name of the VM instance, in the format 'projects/{project}/zones/{zone}/instances/{instance}'.
    instance String
    Name of the VM instance, in the format 'projects/{project}/zones/{zone}/instances/{instance}'.
    instance string
    Name of the VM instance, in the format 'projects/{project}/zones/{zone}/instances/{instance}'.
    instance str
    Name of the VM instance, in the format 'projects/{project}/zones/{zone}/instances/{instance}'.
    instance String
    Name of the VM instance, in the format 'projects/{project}/zones/{zone}/instances/{instance}'.

    ClusterOrchestratorSlurmLoginNodesStorageConfig, ClusterOrchestratorSlurmLoginNodesStorageConfigArgs

    Id string
    ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
    LocalMount string
    A directory inside the VM instance's file system where the storage resource should be mounted (e.g., /mnt/share).
    Id string
    ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
    LocalMount string
    A directory inside the VM instance's file system where the storage resource should be mounted (e.g., /mnt/share).
    id String
    ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
    localMount String
    A directory inside the VM instance's file system where the storage resource should be mounted (e.g., /mnt/share).
    id string
    ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
    localMount string
    A directory inside the VM instance's file system where the storage resource should be mounted (e.g., /mnt/share).
    id str
    ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
    local_mount str
    A directory inside the VM instance's file system where the storage resource should be mounted (e.g., /mnt/share).
    id String
    ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
    localMount String
    A directory inside the VM instance's file system where the storage resource should be mounted (e.g., /mnt/share).

    ClusterOrchestratorSlurmNodeSet, ClusterOrchestratorSlurmNodeSetArgs

    Id string
    Identifier for the nodeset, which allows it to be referenced by partitions. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    ComputeId string
    ID of the compute resource on which this nodeset will run. Must match a key in the cluster's compute_resources.
    ComputeInstance ClusterOrchestratorSlurmNodeSetComputeInstance
    When set in a SlurmNodeSet, indicates that the nodeset should be backed by Compute Engine VM instances. Structure is documented below.
    MaxDynamicNodeCount string
    Controls how many additional nodes a cluster can bring online to handle workloads. Set this value to enable dynamic node creation and limit the number of additional nodes the cluster can bring online. Leave empty if you do not want the cluster to create nodes dynamically, and instead rely only on static nodes.
    StaticNodeCount string
    Number of nodes to be statically created for this nodeset. The cluster will attempt to ensure that at least this many nodes exist at all times.
    StorageConfigs List<ClusterOrchestratorSlurmNodeSetStorageConfig>
    How storage resources should be mounted on each compute node. Structure is documented below.
    Id string
    Identifier for the nodeset, which allows it to be referenced by partitions. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    ComputeId string
    ID of the compute resource on which this nodeset will run. Must match a key in the cluster's compute_resources.
    ComputeInstance ClusterOrchestratorSlurmNodeSetComputeInstance
    When set in a SlurmNodeSet, indicates that the nodeset should be backed by Compute Engine VM instances. Structure is documented below.
    MaxDynamicNodeCount string
    Controls how many additional nodes a cluster can bring online to handle workloads. Set this value to enable dynamic node creation and limit the number of additional nodes the cluster can bring online. Leave empty if you do not want the cluster to create nodes dynamically, and instead rely only on static nodes.
    StaticNodeCount string
    Number of nodes to be statically created for this nodeset. The cluster will attempt to ensure that at least this many nodes exist at all times.
    StorageConfigs []ClusterOrchestratorSlurmNodeSetStorageConfig
    How storage resources should be mounted on each compute node. Structure is documented below.
    id String
    Identifier for the nodeset, which allows it to be referenced by partitions. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    computeId String
    ID of the compute resource on which this nodeset will run. Must match a key in the cluster's compute_resources.
    computeInstance ClusterOrchestratorSlurmNodeSetComputeInstance
    When set in a SlurmNodeSet, indicates that the nodeset should be backed by Compute Engine VM instances. Structure is documented below.
    maxDynamicNodeCount String
    Controls how many additional nodes a cluster can bring online to handle workloads. Set this value to enable dynamic node creation and limit the number of additional nodes the cluster can bring online. Leave empty if you do not want the cluster to create nodes dynamically, and instead rely only on static nodes.
    staticNodeCount String
    Number of nodes to be statically created for this nodeset. The cluster will attempt to ensure that at least this many nodes exist at all times.
    storageConfigs List<ClusterOrchestratorSlurmNodeSetStorageConfig>
    How storage resources should be mounted on each compute node. Structure is documented below.
    id string
    Identifier for the nodeset, which allows it to be referenced by partitions. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    computeId string
    ID of the compute resource on which this nodeset will run. Must match a key in the cluster's compute_resources.
    computeInstance ClusterOrchestratorSlurmNodeSetComputeInstance
    When set in a SlurmNodeSet, indicates that the nodeset should be backed by Compute Engine VM instances. Structure is documented below.
    maxDynamicNodeCount string
    Controls how many additional nodes a cluster can bring online to handle workloads. Set this value to enable dynamic node creation and limit the number of additional nodes the cluster can bring online. Leave empty if you do not want the cluster to create nodes dynamically, and instead rely only on static nodes.
    staticNodeCount string
    Number of nodes to be statically created for this nodeset. The cluster will attempt to ensure that at least this many nodes exist at all times.
    storageConfigs ClusterOrchestratorSlurmNodeSetStorageConfig[]
    How storage resources should be mounted on each compute node. Structure is documented below.
    id str
    Identifier for the nodeset, which allows it to be referenced by partitions. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    compute_id str
    ID of the compute resource on which this nodeset will run. Must match a key in the cluster's compute_resources.
    compute_instance ClusterOrchestratorSlurmNodeSetComputeInstance
    When set in a SlurmNodeSet, indicates that the nodeset should be backed by Compute Engine VM instances. Structure is documented below.
    max_dynamic_node_count str
    Controls how many additional nodes a cluster can bring online to handle workloads. Set this value to enable dynamic node creation and limit the number of additional nodes the cluster can bring online. Leave empty if you do not want the cluster to create nodes dynamically, and instead rely only on static nodes.
    static_node_count str
    Number of nodes to be statically created for this nodeset. The cluster will attempt to ensure that at least this many nodes exist at all times.
    storage_configs Sequence[ClusterOrchestratorSlurmNodeSetStorageConfig]
    How storage resources should be mounted on each compute node. Structure is documented below.
    id String
    Identifier for the nodeset, which allows it to be referenced by partitions. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    computeId String
    ID of the compute resource on which this nodeset will run. Must match a key in the cluster's compute_resources.
    computeInstance Property Map
    When set in a SlurmNodeSet, indicates that the nodeset should be backed by Compute Engine VM instances. Structure is documented below.
    maxDynamicNodeCount String
    Controls how many additional nodes a cluster can bring online to handle workloads. Set this value to enable dynamic node creation and limit the number of additional nodes the cluster can bring online. Leave empty if you do not want the cluster to create nodes dynamically, and instead rely only on static nodes.
    staticNodeCount String
    Number of nodes to be statically created for this nodeset. The cluster will attempt to ensure that at least this many nodes exist at all times.
    storageConfigs List<Property Map>
    How storage resources should be mounted on each compute node. Structure is documented below.

    ClusterOrchestratorSlurmNodeSetComputeInstance, ClusterOrchestratorSlurmNodeSetComputeInstanceArgs

    BootDisk ClusterOrchestratorSlurmNodeSetComputeInstanceBootDisk
    A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
    Labels Dictionary<string, string>
    Labels that should be applied to each VM instance in the nodeset.
    StartupScript string
    Startup script to be run on each VM instance in the nodeset. Max 256KB.
    BootDisk ClusterOrchestratorSlurmNodeSetComputeInstanceBootDisk
    A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
    Labels map[string]string
    Labels that should be applied to each VM instance in the nodeset.
    StartupScript string
    Startup script to be run on each VM instance in the nodeset. Max 256KB.
    bootDisk ClusterOrchestratorSlurmNodeSetComputeInstanceBootDisk
    A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
    labels Map<String,String>
    Labels that should be applied to each VM instance in the nodeset.
    startupScript String
    Startup script to be run on each VM instance in the nodeset. Max 256KB.
    bootDisk ClusterOrchestratorSlurmNodeSetComputeInstanceBootDisk
    A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
    labels {[key: string]: string}
    Labels that should be applied to each VM instance in the nodeset.
    startupScript string
    Startup script to be run on each VM instance in the nodeset. Max 256KB.
    boot_disk ClusterOrchestratorSlurmNodeSetComputeInstanceBootDisk
    A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
    labels Mapping[str, str]
    Labels that should be applied to each VM instance in the nodeset.
    startup_script str
    Startup script to be run on each VM instance in the nodeset. Max 256KB.
    bootDisk Property Map
    A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
    labels Map<String>
    Labels that should be applied to each VM instance in the nodeset.
    startupScript String
    Startup script to be run on each VM instance in the nodeset. Max 256KB.

    ClusterOrchestratorSlurmNodeSetComputeInstanceBootDisk, ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs

    SizeGb string
    Size of the disk in gigabytes. Must be at least 10GB.
    Type string
    Persistent disk type, in the format projects/{project}/zones/{zone}/diskTypes/{disk_type}.
    SizeGb string
    Size of the disk in gigabytes. Must be at least 10GB.
    Type string
    Persistent disk type, in the format projects/{project}/zones/{zone}/diskTypes/{disk_type}.
    sizeGb String
    Size of the disk in gigabytes. Must be at least 10GB.
    type String
    Persistent disk type, in the format projects/{project}/zones/{zone}/diskTypes/{disk_type}.
    sizeGb string
    Size of the disk in gigabytes. Must be at least 10GB.
    type string
    Persistent disk type, in the format projects/{project}/zones/{zone}/diskTypes/{disk_type}.
    size_gb str
    Size of the disk in gigabytes. Must be at least 10GB.
    type str
    Persistent disk type, in the format projects/{project}/zones/{zone}/diskTypes/{disk_type}.
    sizeGb String
    Size of the disk in gigabytes. Must be at least 10GB.
    type String
    Persistent disk type, in the format projects/{project}/zones/{zone}/diskTypes/{disk_type}.

    ClusterOrchestratorSlurmNodeSetStorageConfig, ClusterOrchestratorSlurmNodeSetStorageConfigArgs

    Id string
    ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
    LocalMount string
    A directory inside the VM instance's file system where the storage resource should be mounted (e.g., /mnt/share).
    Id string
    ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
    LocalMount string
    A directory inside the VM instance's file system where the storage resource should be mounted (e.g., /mnt/share).
    id String
    ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
    localMount String
    A directory inside the VM instance's file system where the storage resource should be mounted (e.g., /mnt/share).
    id string
    ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
    localMount string
    A directory inside the VM instance's file system where the storage resource should be mounted (e.g., /mnt/share).
    id str
    ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
    local_mount str
    A directory inside the VM instance's file system where the storage resource should be mounted (e.g., /mnt/share).
    id String
    ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
    localMount String
    A directory inside the VM instance's file system where the storage resource should be mounted (e.g., /mnt/share).

    ClusterOrchestratorSlurmPartition, ClusterOrchestratorSlurmPartitionArgs

    Id string
    ID of the partition, which is how users will identify it. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    NodeSetIds List<string>
    IDs of the nodesets that make up this partition. Values must match SlurmNodeSet.id.
    Id string
    ID of the partition, which is how users will identify it. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    NodeSetIds []string
    IDs of the nodesets that make up this partition. Values must match SlurmNodeSet.id.
    id String
    ID of the partition, which is how users will identify it. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    nodeSetIds List<String>
    IDs of the nodesets that make up this partition. Values must match SlurmNodeSet.id.
    id string
    ID of the partition, which is how users will identify it. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    nodeSetIds string[]
    IDs of the nodesets that make up this partition. Values must match SlurmNodeSet.id.
    id str
    ID of the partition, which is how users will identify it. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    node_set_ids Sequence[str]
    IDs of the nodesets that make up this partition. Values must match SlurmNodeSet.id.
    id String
    ID of the partition, which is how users will identify it. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
    nodeSetIds List<String>
    IDs of the nodesets that make up this partition. Values must match SlurmNodeSet.id.

    ClusterStorageResource, ClusterStorageResourceArgs

    Config ClusterStorageResourceConfig
    Describes how a storage resource should be initialized. Each storage resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
    Id string
    The identifier for this object. Format specified above.
    Buckets List<ClusterStorageResourceBucket>
    (Output) Name of the bucket.
    Filestores List<ClusterStorageResourceFilestore>
    (Output) A reference to a Filestore instance. Structure is documented below.
    Lustres List<ClusterStorageResourceLustre>

    (Output) A reference to a Managed Lustre instance. Structure is documented below.

    <a name=<span pulumi-lang-nodejs=""nestedStorageResourcesBucket"" pulumi-lang-dotnet=""NestedStorageResourcesBucket"" pulumi-lang-go=""nestedStorageResourcesBucket"" pulumi-lang-python=""nested_storage_resources_bucket"" pulumi-lang-yaml=""nestedStorageResourcesBucket"" pulumi-lang-java=""nestedStorageResourcesBucket"">"nested_storage_resources_bucket">The bucket block contains:

    Config ClusterStorageResourceConfig
    Describes how a storage resource should be initialized. Each storage resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
    Id string
    The identifier for this object. Format specified above.
    Buckets []ClusterStorageResourceBucket
    (Output) Name of the bucket.
    Filestores []ClusterStorageResourceFilestore
    (Output) A reference to a Filestore instance. Structure is documented below.
    Lustres []ClusterStorageResourceLustre

    (Output) A reference to a Managed Lustre instance. Structure is documented below.

    <a name=<span pulumi-lang-nodejs=""nestedStorageResourcesBucket"" pulumi-lang-dotnet=""NestedStorageResourcesBucket"" pulumi-lang-go=""nestedStorageResourcesBucket"" pulumi-lang-python=""nested_storage_resources_bucket"" pulumi-lang-yaml=""nestedStorageResourcesBucket"" pulumi-lang-java=""nestedStorageResourcesBucket"">"nested_storage_resources_bucket">The bucket block contains:

    config ClusterStorageResourceConfig
    Describes how a storage resource should be initialized. Each storage resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
    id String
    The identifier for this object. Format specified above.
    buckets List<ClusterStorageResourceBucket>
    (Output) Name of the bucket.
    filestores List<ClusterStorageResourceFilestore>
    (Output) A reference to a Filestore instance. Structure is documented below.
    lustres List<ClusterStorageResourceLustre>

    (Output) A reference to a Managed Lustre instance. Structure is documented below.

    <a name=<span pulumi-lang-nodejs=""nestedStorageResourcesBucket"" pulumi-lang-dotnet=""NestedStorageResourcesBucket"" pulumi-lang-go=""nestedStorageResourcesBucket"" pulumi-lang-python=""nested_storage_resources_bucket"" pulumi-lang-yaml=""nestedStorageResourcesBucket"" pulumi-lang-java=""nestedStorageResourcesBucket"">"nested_storage_resources_bucket">The bucket block contains:

    config ClusterStorageResourceConfig
    Describes how a storage resource should be initialized. Each storage resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
    id string
    The identifier for this object. Format specified above.
    buckets ClusterStorageResourceBucket[]
    (Output) Name of the bucket.
    filestores ClusterStorageResourceFilestore[]
    (Output) A reference to a Filestore instance. Structure is documented below.
    lustres ClusterStorageResourceLustre[]

    (Output) A reference to a Managed Lustre instance. Structure is documented below.

    <a name=<span pulumi-lang-nodejs=""nestedStorageResourcesBucket"" pulumi-lang-dotnet=""NestedStorageResourcesBucket"" pulumi-lang-go=""nestedStorageResourcesBucket"" pulumi-lang-python=""nested_storage_resources_bucket"" pulumi-lang-yaml=""nestedStorageResourcesBucket"" pulumi-lang-java=""nestedStorageResourcesBucket"">"nested_storage_resources_bucket">The bucket block contains:

    config ClusterStorageResourceConfig
    Describes how a storage resource should be initialized. Each storage resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
    id str
    The identifier for this object. Format specified above.
    buckets Sequence[ClusterStorageResourceBucket]
    (Output) Name of the bucket.
    filestores Sequence[ClusterStorageResourceFilestore]
    (Output) A reference to a Filestore instance. Structure is documented below.
    lustres Sequence[ClusterStorageResourceLustre]

    (Output) A reference to a Managed Lustre instance. Structure is documented below.

    <a name=<span pulumi-lang-nodejs=""nestedStorageResourcesBucket"" pulumi-lang-dotnet=""NestedStorageResourcesBucket"" pulumi-lang-go=""nestedStorageResourcesBucket"" pulumi-lang-python=""nested_storage_resources_bucket"" pulumi-lang-yaml=""nestedStorageResourcesBucket"" pulumi-lang-java=""nestedStorageResourcesBucket"">"nested_storage_resources_bucket">The bucket block contains:

    config Property Map
    Describes how a storage resource should be initialized. Each storage resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
    id String
    The identifier for this object. Format specified above.
    buckets List<Property Map>
    (Output) Name of the bucket.
    filestores List<Property Map>
    (Output) A reference to a Filestore instance. Structure is documented below.
    lustres List<Property Map>

    (Output) A reference to a Managed Lustre instance. Structure is documented below.

    <a name=<span pulumi-lang-nodejs=""nestedStorageResourcesBucket"" pulumi-lang-dotnet=""NestedStorageResourcesBucket"" pulumi-lang-go=""nestedStorageResourcesBucket"" pulumi-lang-python=""nested_storage_resources_bucket"" pulumi-lang-yaml=""nestedStorageResourcesBucket"" pulumi-lang-java=""nestedStorageResourcesBucket"">"nested_storage_resources_bucket">The bucket block contains:

    ClusterStorageResourceBucket, ClusterStorageResourceBucketArgs

    Bucket string
    Name of the bucket.
    Bucket string
    Name of the bucket.
    bucket String
    Name of the bucket.
    bucket string
    Name of the bucket.
    bucket str
    Name of the bucket.
    bucket String
    Name of the bucket.

    ClusterStorageResourceConfig, ClusterStorageResourceConfigArgs

    ExistingBucket ClusterStorageResourceConfigExistingBucket
    When set in a StorageResourceConfig, indicates that an existing Google Cloud Storage bucket should be imported. Structure is documented below.
    ExistingFilestore ClusterStorageResourceConfigExistingFilestore
    When set in a StorageResourceConfig, indicates that an existing Filestore instance should be imported. Structure is documented below.
    ExistingLustre ClusterStorageResourceConfigExistingLustre
    When set in a StorageResourceConfig, indicates that an existing Managed Lustre instance should be imported. Structure is documented below.
    NewBucket ClusterStorageResourceConfigNewBucket
    When set in a StorageResourceConfig, indicates that a new Google Cloud Storage bucket should be created. Structure is documented below.
    NewFilestore ClusterStorageResourceConfigNewFilestore
    When set in a StorageResourceConfig, indicates that a new Filestore instance should be created. Structure is documented below.
    NewLustre ClusterStorageResourceConfigNewLustre
    When set in a StorageResourceConfig, indicates that a new Managed Lustre instance should be created. Structure is documented below.
    ExistingBucket ClusterStorageResourceConfigExistingBucket
    When set in a StorageResourceConfig, indicates that an existing Google Cloud Storage bucket should be imported. Structure is documented below.
    ExistingFilestore ClusterStorageResourceConfigExistingFilestore
    When set in a StorageResourceConfig, indicates that an existing Filestore instance should be imported. Structure is documented below.
    ExistingLustre ClusterStorageResourceConfigExistingLustre
    When set in a StorageResourceConfig, indicates that an existing Managed Lustre instance should be imported. Structure is documented below.
    NewBucket ClusterStorageResourceConfigNewBucket
    When set in a StorageResourceConfig, indicates that a new Google Cloud Storage bucket should be created. Structure is documented below.
    NewFilestore ClusterStorageResourceConfigNewFilestore
    When set in a StorageResourceConfig, indicates that a new Filestore instance should be created. Structure is documented below.
    NewLustre ClusterStorageResourceConfigNewLustre
    When set in a StorageResourceConfig, indicates that a new Managed Lustre instance should be created. Structure is documented below.
    existingBucket ClusterStorageResourceConfigExistingBucket
    When set in a StorageResourceConfig, indicates that an existing Google Cloud Storage bucket should be imported. Structure is documented below.
    existingFilestore ClusterStorageResourceConfigExistingFilestore
    When set in a StorageResourceConfig, indicates that an existing Filestore instance should be imported. Structure is documented below.
    existingLustre ClusterStorageResourceConfigExistingLustre
    When set in a StorageResourceConfig, indicates that an existing Managed Lustre instance should be imported. Structure is documented below.
    newBucket ClusterStorageResourceConfigNewBucket
    When set in a StorageResourceConfig, indicates that a new Google Cloud Storage bucket should be created. Structure is documented below.
    newFilestore ClusterStorageResourceConfigNewFilestore
    When set in a StorageResourceConfig, indicates that a new Filestore instance should be created. Structure is documented below.
    newLustre ClusterStorageResourceConfigNewLustre
    When set in a StorageResourceConfig, indicates that a new Managed Lustre instance should be created. Structure is documented below.
    existingBucket ClusterStorageResourceConfigExistingBucket
    When set in a StorageResourceConfig, indicates that an existing Google Cloud Storage bucket should be imported. Structure is documented below.
    existingFilestore ClusterStorageResourceConfigExistingFilestore
    When set in a StorageResourceConfig, indicates that an existing Filestore instance should be imported. Structure is documented below.
    existingLustre ClusterStorageResourceConfigExistingLustre
    When set in a StorageResourceConfig, indicates that an existing Managed Lustre instance should be imported. Structure is documented below.
    newBucket ClusterStorageResourceConfigNewBucket
    When set in a StorageResourceConfig, indicates that a new Google Cloud Storage bucket should be created. Structure is documented below.
    newFilestore ClusterStorageResourceConfigNewFilestore
    When set in a StorageResourceConfig, indicates that a new Filestore instance should be created. Structure is documented below.
    newLustre ClusterStorageResourceConfigNewLustre
    When set in a StorageResourceConfig, indicates that a new Managed Lustre instance should be created. Structure is documented below.
    existing_bucket ClusterStorageResourceConfigExistingBucket
    When set in a StorageResourceConfig, indicates that an existing Google Cloud Storage bucket should be imported. Structure is documented below.
    existing_filestore ClusterStorageResourceConfigExistingFilestore
    When set in a StorageResourceConfig, indicates that an existing Filestore instance should be imported. Structure is documented below.
    existing_lustre ClusterStorageResourceConfigExistingLustre
    When set in a StorageResourceConfig, indicates that an existing Managed Lustre instance should be imported. Structure is documented below.
    new_bucket ClusterStorageResourceConfigNewBucket
    When set in a StorageResourceConfig, indicates that a new Google Cloud Storage bucket should be created. Structure is documented below.
    new_filestore ClusterStorageResourceConfigNewFilestore
    When set in a StorageResourceConfig, indicates that a new Filestore instance should be created. Structure is documented below.
    new_lustre ClusterStorageResourceConfigNewLustre
    When set in a StorageResourceConfig, indicates that a new Managed Lustre instance should be created. Structure is documented below.
    existingBucket Property Map
    When set in a StorageResourceConfig, indicates that an existing Google Cloud Storage bucket should be imported. Structure is documented below.
    existingFilestore Property Map
    When set in a StorageResourceConfig, indicates that an existing Filestore instance should be imported. Structure is documented below.
    existingLustre Property Map
    When set in a StorageResourceConfig, indicates that an existing Managed Lustre instance should be imported. Structure is documented below.
    newBucket Property Map
    When set in a StorageResourceConfig, indicates that a new Google Cloud Storage bucket should be created. Structure is documented below.
    newFilestore Property Map
    When set in a StorageResourceConfig, indicates that a new Filestore instance should be created. Structure is documented below.
    newLustre Property Map
    When set in a StorageResourceConfig, indicates that a new Managed Lustre instance should be created. Structure is documented below.

    ClusterStorageResourceConfigExistingBucket, ClusterStorageResourceConfigExistingBucketArgs

    Bucket string
    Name of the Cloud Storage bucket to import.
    Bucket string
    Name of the Cloud Storage bucket to import.
    bucket String
    Name of the Cloud Storage bucket to import.
    bucket string
    Name of the Cloud Storage bucket to import.
    bucket str
    Name of the Cloud Storage bucket to import.
    bucket String
    Name of the Cloud Storage bucket to import.

    ClusterStorageResourceConfigExistingFilestore, ClusterStorageResourceConfigExistingFilestoreArgs

    Filestore string
    Name of the Filestore instance to import, in the format projects/{project}/locations/{location}/instances/{instance}
    Filestore string
    Name of the Filestore instance to import, in the format projects/{project}/locations/{location}/instances/{instance}
    filestore String
    Name of the Filestore instance to import, in the format projects/{project}/locations/{location}/instances/{instance}
    filestore string
    Name of the Filestore instance to import, in the format projects/{project}/locations/{location}/instances/{instance}
    filestore str
    Name of the Filestore instance to import, in the format projects/{project}/locations/{location}/instances/{instance}
    filestore String
    Name of the Filestore instance to import, in the format projects/{project}/locations/{location}/instances/{instance}

    ClusterStorageResourceConfigExistingLustre, ClusterStorageResourceConfigExistingLustreArgs

    Lustre string
    Name of the Managed Lustre instance to import, in the format projects/{project}/locations/{location}/instances/{instance}
    Lustre string
    Name of the Managed Lustre instance to import, in the format projects/{project}/locations/{location}/instances/{instance}
    lustre String
    Name of the Managed Lustre instance to import, in the format projects/{project}/locations/{location}/instances/{instance}
    lustre string
    Name of the Managed Lustre instance to import, in the format projects/{project}/locations/{location}/instances/{instance}
    lustre str
    Name of the Managed Lustre instance to import, in the format projects/{project}/locations/{location}/instances/{instance}
    lustre String
    Name of the Managed Lustre instance to import, in the format projects/{project}/locations/{location}/instances/{instance}

    ClusterStorageResourceConfigNewBucket, ClusterStorageResourceConfigNewBucketArgs

    Bucket string
    Name of the Cloud Storage bucket to create.
    Autoclass ClusterStorageResourceConfigNewBucketAutoclass
    Message describing Google Cloud Storage autoclass configuration Structure is documented below.
    HierarchicalNamespace ClusterStorageResourceConfigNewBucketHierarchicalNamespace
    Message describing Google Cloud Storage hierarchical namespace configuration Structure is documented below.
    StorageClass string
    If set, uses the provided storage class as the bucket's default storage class. Possible values: STANDARD NEARLINE COLDLINE ARCHIVE
    Bucket string
    Name of the Cloud Storage bucket to create.
    Autoclass ClusterStorageResourceConfigNewBucketAutoclass
    Message describing Google Cloud Storage autoclass configuration Structure is documented below.
    HierarchicalNamespace ClusterStorageResourceConfigNewBucketHierarchicalNamespace
    Message describing Google Cloud Storage hierarchical namespace configuration Structure is documented below.
    StorageClass string
    If set, uses the provided storage class as the bucket's default storage class. Possible values: STANDARD NEARLINE COLDLINE ARCHIVE
    bucket String
    Name of the Cloud Storage bucket to create.
    autoclass ClusterStorageResourceConfigNewBucketAutoclass
    Message describing Google Cloud Storage autoclass configuration Structure is documented below.
    hierarchicalNamespace ClusterStorageResourceConfigNewBucketHierarchicalNamespace
    Message describing Google Cloud Storage hierarchical namespace configuration Structure is documented below.
    storageClass String
    If set, uses the provided storage class as the bucket's default storage class. Possible values: STANDARD NEARLINE COLDLINE ARCHIVE
    bucket string
    Name of the Cloud Storage bucket to create.
    autoclass ClusterStorageResourceConfigNewBucketAutoclass
    Message describing Google Cloud Storage autoclass configuration Structure is documented below.
    hierarchicalNamespace ClusterStorageResourceConfigNewBucketHierarchicalNamespace
    Message describing Google Cloud Storage hierarchical namespace configuration Structure is documented below.
    storageClass string
    If set, uses the provided storage class as the bucket's default storage class. Possible values: STANDARD NEARLINE COLDLINE ARCHIVE
    bucket str
    Name of the Cloud Storage bucket to create.
    autoclass ClusterStorageResourceConfigNewBucketAutoclass
    Message describing Google Cloud Storage autoclass configuration Structure is documented below.
    hierarchical_namespace ClusterStorageResourceConfigNewBucketHierarchicalNamespace
    Message describing Google Cloud Storage hierarchical namespace configuration Structure is documented below.
    storage_class str
    If set, uses the provided storage class as the bucket's default storage class. Possible values: STANDARD NEARLINE COLDLINE ARCHIVE
    bucket String
    Name of the Cloud Storage bucket to create.
    autoclass Property Map
    Message describing Google Cloud Storage autoclass configuration Structure is documented below.
    hierarchicalNamespace Property Map
    Message describing Google Cloud Storage hierarchical namespace configuration Structure is documented below.
    storageClass String
    If set, uses the provided storage class as the bucket's default storage class. Possible values: STANDARD NEARLINE COLDLINE ARCHIVE

    ClusterStorageResourceConfigNewBucketAutoclass, ClusterStorageResourceConfigNewBucketAutoclassArgs

    Enabled bool
    Enables Auto-class feature.
    Enabled bool
    Enables Auto-class feature.
    enabled Boolean
    Enables Auto-class feature.
    enabled boolean
    Enables Auto-class feature.
    enabled bool
    Enables Auto-class feature.
    enabled Boolean
    Enables Auto-class feature.

    ClusterStorageResourceConfigNewBucketHierarchicalNamespace, ClusterStorageResourceConfigNewBucketHierarchicalNamespaceArgs

    Enabled bool
    Enables hierarchical namespace setup for the bucket.
    Enabled bool
    Enables hierarchical namespace setup for the bucket.
    enabled Boolean
    Enables hierarchical namespace setup for the bucket.
    enabled boolean
    Enables hierarchical namespace setup for the bucket.
    enabled bool
    Enables hierarchical namespace setup for the bucket.
    enabled Boolean
    Enables hierarchical namespace setup for the bucket.

    ClusterStorageResourceConfigNewFilestore, ClusterStorageResourceConfigNewFilestoreArgs

    FileShares List<ClusterStorageResourceConfigNewFilestoreFileShare>
    File system shares on the instance. Exactly one file share must be specified. Structure is documented below.
    Filestore string
    Name of the Filestore instance to create, in the format projects/{project}/locations/{location}/instances/{instance}
    Tier string
    Service tier to use for the instance. Possible values: ZONAL REGIONAL Possible values are: TIER_UNSPECIFIED, ZONAL, REGIONAL.
    Description string
    Description of the instance. Maximum of 2048 characters.
    Protocol string
    Access protocol to use for all file shares in the instance. Defaults to NFS V3 if not set. Possible values: NFSV3 NFSV41 Possible values are: PROTOCOL_UNSPECIFIED, NFSV3, NFSV41.
    FileShares []ClusterStorageResourceConfigNewFilestoreFileShare
    File system shares on the instance. Exactly one file share must be specified. Structure is documented below.
    Filestore string
    Name of the Filestore instance to create, in the format projects/{project}/locations/{location}/instances/{instance}
    Tier string
    Service tier to use for the instance. Possible values: ZONAL REGIONAL Possible values are: TIER_UNSPECIFIED, ZONAL, REGIONAL.
    Description string
    Description of the instance. Maximum of 2048 characters.
    Protocol string
    Access protocol to use for all file shares in the instance. Defaults to NFS V3 if not set. Possible values: NFSV3 NFSV41 Possible values are: PROTOCOL_UNSPECIFIED, NFSV3, NFSV41.
    fileShares List<ClusterStorageResourceConfigNewFilestoreFileShare>
    File system shares on the instance. Exactly one file share must be specified. Structure is documented below.
    filestore String
    Name of the Filestore instance to create, in the format projects/{project}/locations/{location}/instances/{instance}
    tier String
    Service tier to use for the instance. Possible values: ZONAL REGIONAL Possible values are: TIER_UNSPECIFIED, ZONAL, REGIONAL.
    description String
    Description of the instance. Maximum of 2048 characters.
    protocol String
    Access protocol to use for all file shares in the instance. Defaults to NFS V3 if not set. Possible values: NFSV3 NFSV41 Possible values are: PROTOCOL_UNSPECIFIED, NFSV3, NFSV41.
    fileShares ClusterStorageResourceConfigNewFilestoreFileShare[]
    File system shares on the instance. Exactly one file share must be specified. Structure is documented below.
    filestore string
    Name of the Filestore instance to create, in the format projects/{project}/locations/{location}/instances/{instance}
    tier string
    Service tier to use for the instance. Possible values: ZONAL REGIONAL Possible values are: TIER_UNSPECIFIED, ZONAL, REGIONAL.
    description string
    Description of the instance. Maximum of 2048 characters.
    protocol string
    Access protocol to use for all file shares in the instance. Defaults to NFS V3 if not set. Possible values: NFSV3 NFSV41 Possible values are: PROTOCOL_UNSPECIFIED, NFSV3, NFSV41.
    file_shares Sequence[ClusterStorageResourceConfigNewFilestoreFileShare]
    File system shares on the instance. Exactly one file share must be specified. Structure is documented below.
    filestore str
    Name of the Filestore instance to create, in the format projects/{project}/locations/{location}/instances/{instance}
    tier str
    Service tier to use for the instance. Possible values: ZONAL REGIONAL Possible values are: TIER_UNSPECIFIED, ZONAL, REGIONAL.
    description str
    Description of the instance. Maximum of 2048 characters.
    protocol str
    Access protocol to use for all file shares in the instance. Defaults to NFS V3 if not set. Possible values: NFSV3 NFSV41 Possible values are: PROTOCOL_UNSPECIFIED, NFSV3, NFSV41.
    fileShares List<Property Map>
    File system shares on the instance. Exactly one file share must be specified. Structure is documented below.
    filestore String
    Name of the Filestore instance to create, in the format projects/{project}/locations/{location}/instances/{instance}
    tier String
    Service tier to use for the instance. Possible values: ZONAL REGIONAL Possible values are: TIER_UNSPECIFIED, ZONAL, REGIONAL.
    description String
    Description of the instance. Maximum of 2048 characters.
    protocol String
    Access protocol to use for all file shares in the instance. Defaults to NFS V3 if not set. Possible values: NFSV3 NFSV41 Possible values are: PROTOCOL_UNSPECIFIED, NFSV3, NFSV41.

    ClusterStorageResourceConfigNewFilestoreFileShare, ClusterStorageResourceConfigNewFilestoreFileShareArgs

    CapacityGb string
    Size of the filestore in GB. Must be between 1024 and 102400, and must meet scalability requirements described at https://cloud.google.com/filestore/docs/service-tiers.
    FileShare string
    Filestore share location
    CapacityGb string
    Size of the filestore in GB. Must be between 1024 and 102400, and must meet scalability requirements described at https://cloud.google.com/filestore/docs/service-tiers.
    FileShare string
    Filestore share location
    capacityGb String
    Size of the filestore in GB. Must be between 1024 and 102400, and must meet scalability requirements described at https://cloud.google.com/filestore/docs/service-tiers.
    fileShare String
    Filestore share location
    capacityGb string
    Size of the filestore in GB. Must be between 1024 and 102400, and must meet scalability requirements described at https://cloud.google.com/filestore/docs/service-tiers.
    fileShare string
    Filestore share location
    capacity_gb str
    Size of the filestore in GB. Must be between 1024 and 102400, and must meet scalability requirements described at https://cloud.google.com/filestore/docs/service-tiers.
    file_share str
    Filestore share location
    capacityGb String
    Size of the filestore in GB. Must be between 1024 and 102400, and must meet scalability requirements described at https://cloud.google.com/filestore/docs/service-tiers.
    fileShare String
    Filestore share location

    ClusterStorageResourceConfigNewLustre, ClusterStorageResourceConfigNewLustreArgs

    CapacityGb string
    Storage capacity of the instance in gibibytes (GiB). Allowed values are between 18000 and 7632000.
    Filesystem string
    Filesystem name for this instance. This name is used by client-side tools, including when mounting the instance. Must be 8 characters or less and can only contain letters and numbers.
    Lustre string
    (Output) Name of the Managed Lustre instance, in the format projects/{project}/locations/{location}/instances/{instance}
    Description string
    Description of the Managed Lustre instance. Maximum of 2048 characters.
    CapacityGb string
    Storage capacity of the instance in gibibytes (GiB). Allowed values are between 18000 and 7632000.
    Filesystem string
    Filesystem name for this instance. This name is used by client-side tools, including when mounting the instance. Must be 8 characters or less and can only contain letters and numbers.
    Lustre string
    (Output) Name of the Managed Lustre instance, in the format projects/{project}/locations/{location}/instances/{instance}
    Description string
    Description of the Managed Lustre instance. Maximum of 2048 characters.
    capacityGb String
    Storage capacity of the instance in gibibytes (GiB). Allowed values are between 18000 and 7632000.
    filesystem String
    Filesystem name for this instance. This name is used by client-side tools, including when mounting the instance. Must be 8 characters or less and can only contain letters and numbers.
    lustre String
    (Output) Name of the Managed Lustre instance, in the format projects/{project}/locations/{location}/instances/{instance}
    description String
    Description of the Managed Lustre instance. Maximum of 2048 characters.
    capacityGb string
    Storage capacity of the instance in gibibytes (GiB). Allowed values are between 18000 and 7632000.
    filesystem string
    Filesystem name for this instance. This name is used by client-side tools, including when mounting the instance. Must be 8 characters or less and can only contain letters and numbers.
    lustre string
    (Output) Name of the Managed Lustre instance, in the format projects/{project}/locations/{location}/instances/{instance}
    description string
    Description of the Managed Lustre instance. Maximum of 2048 characters.
    capacity_gb str
    Storage capacity of the instance in gibibytes (GiB). Allowed values are between 18000 and 7632000.
    filesystem str
    Filesystem name for this instance. This name is used by client-side tools, including when mounting the instance. Must be 8 characters or less and can only contain letters and numbers.
    lustre str
    (Output) Name of the Managed Lustre instance, in the format projects/{project}/locations/{location}/instances/{instance}
    description str
    Description of the Managed Lustre instance. Maximum of 2048 characters.
    capacityGb String
    Storage capacity of the instance in gibibytes (GiB). Allowed values are between 18000 and 7632000.
    filesystem String
    Filesystem name for this instance. This name is used by client-side tools, including when mounting the instance. Must be 8 characters or less and can only contain letters and numbers.
    lustre String
    (Output) Name of the Managed Lustre instance, in the format projects/{project}/locations/{location}/instances/{instance}
    description String
    Description of the Managed Lustre instance. Maximum of 2048 characters.

    ClusterStorageResourceFilestore, ClusterStorageResourceFilestoreArgs

    Filestore string
    Name of the Filestore instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
    Filestore string
    Name of the Filestore instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
    filestore String
    Name of the Filestore instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
    filestore string
    Name of the Filestore instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
    filestore str
    Name of the Filestore instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
    filestore String
    Name of the Filestore instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'

    ClusterStorageResourceLustre, ClusterStorageResourceLustreArgs

    Lustre string
    Name of the Managed Lustre instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
    Lustre string
    Name of the Managed Lustre instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
    lustre String
    Name of the Managed Lustre instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
    lustre string
    Name of the Managed Lustre instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
    lustre str
    Name of the Managed Lustre instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
    lustre String
    Name of the Managed Lustre instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'

    Import

    Cluster can be imported using any of these accepted formats:

    • projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}
    • {{project}}/{{location}}/{{cluster_id}}
    • {{location}}/{{cluster_id}}

    When using the pulumi import command, Cluster can be imported using one of the formats above. For example:

    $ pulumi import gcp:hypercomputecluster/cluster:Cluster default projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}
    $ pulumi import gcp:hypercomputecluster/cluster:Cluster default {{project}}/{{location}}/{{cluster_id}}
    $ pulumi import gcp:hypercomputecluster/cluster:Cluster default {{location}}/{{cluster_id}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Viewing docs for Google Cloud v9.15.0
    published on Thursday, Mar 12, 2026 by Pulumi
      Try Pulumi Cloud free. Your team will thank you.