published on Thursday, Mar 12, 2026 by Pulumi
published on Thursday, Mar 12, 2026 by Pulumi
A collection of virtual machines and connected resources forming a high-performance computing cluster capable of running large-scale, tightly coupled workloads. A cluster combines a set a compute resources that perform computations, storage resources that contain inputs and store outputs, an orchestrator that is responsible for assigning jobs to compute resources, and network resources that connect everything together.
Example Usage
Hypercomputecluster Cluster Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const project = gcp.organizations.getProject({});
const projectId = project.then(project => project.name);
const cluster = new gcp.hypercomputecluster.Cluster("cluster", {
clusterId: "my-cluster",
location: "us-central1",
description: "Cluster Director instance created through Terraform",
networkResources: [{
id: "network1",
config: {
newNetwork: {
description: "Network one",
network: projectId.then(projectId => `projects/${projectId}/global/networks/cluster-net1`),
},
},
}],
computeResources: [{
id: "compute1",
config: {
newOnDemandInstances: {
machineType: "n2-standard-2",
zone: "us-central1-a",
},
},
}],
orchestrator: {
slurm: {
loginNodes: {
machineType: "n2-standard-2",
count: "1",
zone: "us-central1-a",
bootDisk: {
sizeGb: "100",
type: "pd-balanced",
},
},
nodeSets: [{
id: "nodeset1",
computeId: "compute1",
staticNodeCount: "1",
computeInstance: {
bootDisk: {
sizeGb: "100",
type: "pd-balanced",
},
},
}],
partitions: [{
id: "partition1",
nodeSetIds: ["nodeset1"],
}],
defaultPartition: "partition1",
},
},
});
import pulumi
import pulumi_gcp as gcp
project = gcp.organizations.get_project()
project_id = project.name
cluster = gcp.hypercomputecluster.Cluster("cluster",
cluster_id="my-cluster",
location="us-central1",
description="Cluster Director instance created through Terraform",
network_resources=[{
"id": "network1",
"config": {
"new_network": {
"description": "Network one",
"network": f"projects/{project_id}/global/networks/cluster-net1",
},
},
}],
compute_resources=[{
"id": "compute1",
"config": {
"new_on_demand_instances": {
"machine_type": "n2-standard-2",
"zone": "us-central1-a",
},
},
}],
orchestrator={
"slurm": {
"login_nodes": {
"machine_type": "n2-standard-2",
"count": "1",
"zone": "us-central1-a",
"boot_disk": {
"size_gb": "100",
"type": "pd-balanced",
},
},
"node_sets": [{
"id": "nodeset1",
"compute_id": "compute1",
"static_node_count": "1",
"compute_instance": {
"boot_disk": {
"size_gb": "100",
"type": "pd-balanced",
},
},
}],
"partitions": [{
"id": "partition1",
"node_set_ids": ["nodeset1"],
}],
"default_partition": "partition1",
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/hypercomputecluster"
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
if err != nil {
return err
}
projectId := project.Name
_, err = hypercomputecluster.NewCluster(ctx, "cluster", &hypercomputecluster.ClusterArgs{
ClusterId: pulumi.String("my-cluster"),
Location: pulumi.String("us-central1"),
Description: pulumi.String("Cluster Director instance created through Terraform"),
NetworkResources: hypercomputecluster.ClusterNetworkResourceArray{
&hypercomputecluster.ClusterNetworkResourceArgs{
Id: pulumi.String("network1"),
Config: &hypercomputecluster.ClusterNetworkResourceConfigArgs{
NewNetwork: &hypercomputecluster.ClusterNetworkResourceConfigNewNetworkArgs{
Description: pulumi.String("Network one"),
Network: pulumi.Sprintf("projects/%v/global/networks/cluster-net1", projectId),
},
},
},
},
ComputeResources: hypercomputecluster.ClusterComputeResourceArray{
&hypercomputecluster.ClusterComputeResourceArgs{
Id: pulumi.String("compute1"),
Config: &hypercomputecluster.ClusterComputeResourceConfigArgs{
NewOnDemandInstances: &hypercomputecluster.ClusterComputeResourceConfigNewOnDemandInstancesArgs{
MachineType: pulumi.String("n2-standard-2"),
Zone: pulumi.String("us-central1-a"),
},
},
},
},
Orchestrator: &hypercomputecluster.ClusterOrchestratorArgs{
Slurm: &hypercomputecluster.ClusterOrchestratorSlurmArgs{
LoginNodes: &hypercomputecluster.ClusterOrchestratorSlurmLoginNodesArgs{
MachineType: pulumi.String("n2-standard-2"),
Count: pulumi.String("1"),
Zone: pulumi.String("us-central1-a"),
BootDisk: &hypercomputecluster.ClusterOrchestratorSlurmLoginNodesBootDiskArgs{
SizeGb: pulumi.String("100"),
Type: pulumi.String("pd-balanced"),
},
},
NodeSets: hypercomputecluster.ClusterOrchestratorSlurmNodeSetArray{
&hypercomputecluster.ClusterOrchestratorSlurmNodeSetArgs{
Id: pulumi.String("nodeset1"),
ComputeId: pulumi.String("compute1"),
StaticNodeCount: pulumi.String("1"),
ComputeInstance: &hypercomputecluster.ClusterOrchestratorSlurmNodeSetComputeInstanceArgs{
BootDisk: &hypercomputecluster.ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs{
SizeGb: pulumi.String("100"),
Type: pulumi.String("pd-balanced"),
},
},
},
},
Partitions: hypercomputecluster.ClusterOrchestratorSlurmPartitionArray{
&hypercomputecluster.ClusterOrchestratorSlurmPartitionArgs{
Id: pulumi.String("partition1"),
NodeSetIds: pulumi.StringArray{
pulumi.String("nodeset1"),
},
},
},
DefaultPartition: pulumi.String("partition1"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var project = Gcp.Organizations.GetProject.Invoke();
var projectId = project.Apply(getProjectResult => getProjectResult.Name);
var cluster = new Gcp.HyperComputeCluster.Cluster("cluster", new()
{
ClusterId = "my-cluster",
Location = "us-central1",
Description = "Cluster Director instance created through Terraform",
NetworkResources = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceArgs
{
Id = "network1",
Config = new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceConfigArgs
{
NewNetwork = new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceConfigNewNetworkArgs
{
Description = "Network one",
Network = projectId.Apply(projectId => $"projects/{projectId}/global/networks/cluster-net1"),
},
},
},
},
ComputeResources = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceArgs
{
Id = "compute1",
Config = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigArgs
{
NewOnDemandInstances = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigNewOnDemandInstancesArgs
{
MachineType = "n2-standard-2",
Zone = "us-central1-a",
},
},
},
},
Orchestrator = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorArgs
{
Slurm = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmArgs
{
LoginNodes = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmLoginNodesArgs
{
MachineType = "n2-standard-2",
Count = "1",
Zone = "us-central1-a",
BootDisk = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmLoginNodesBootDiskArgs
{
SizeGb = "100",
Type = "pd-balanced",
},
},
NodeSets = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetArgs
{
Id = "nodeset1",
ComputeId = "compute1",
StaticNodeCount = "1",
ComputeInstance = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetComputeInstanceArgs
{
BootDisk = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs
{
SizeGb = "100",
Type = "pd-balanced",
},
},
},
},
Partitions = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmPartitionArgs
{
Id = "partition1",
NodeSetIds = new[]
{
"nodeset1",
},
},
},
DefaultPartition = "partition1",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.hypercomputecluster.Cluster;
import com.pulumi.gcp.hypercomputecluster.ClusterArgs;
import com.pulumi.gcp.hypercomputecluster.inputs.ClusterNetworkResourceArgs;
import com.pulumi.gcp.hypercomputecluster.inputs.ClusterNetworkResourceConfigArgs;
import com.pulumi.gcp.hypercomputecluster.inputs.ClusterNetworkResourceConfigNewNetworkArgs;
import com.pulumi.gcp.hypercomputecluster.inputs.ClusterComputeResourceArgs;
import com.pulumi.gcp.hypercomputecluster.inputs.ClusterComputeResourceConfigArgs;
import com.pulumi.gcp.hypercomputecluster.inputs.ClusterComputeResourceConfigNewOnDemandInstancesArgs;
import com.pulumi.gcp.hypercomputecluster.inputs.ClusterOrchestratorArgs;
import com.pulumi.gcp.hypercomputecluster.inputs.ClusterOrchestratorSlurmArgs;
import com.pulumi.gcp.hypercomputecluster.inputs.ClusterOrchestratorSlurmLoginNodesArgs;
import com.pulumi.gcp.hypercomputecluster.inputs.ClusterOrchestratorSlurmLoginNodesBootDiskArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var project = OrganizationsFunctions.getProject(GetProjectArgs.builder()
.build());
final var projectId = project.name();
var cluster = new Cluster("cluster", ClusterArgs.builder()
.clusterId("my-cluster")
.location("us-central1")
.description("Cluster Director instance created through Terraform")
.networkResources(ClusterNetworkResourceArgs.builder()
.id("network1")
.config(ClusterNetworkResourceConfigArgs.builder()
.newNetwork(ClusterNetworkResourceConfigNewNetworkArgs.builder()
.description("Network one")
.network(String.format("projects/%s/global/networks/cluster-net1", projectId))
.build())
.build())
.build())
.computeResources(ClusterComputeResourceArgs.builder()
.id("compute1")
.config(ClusterComputeResourceConfigArgs.builder()
.newOnDemandInstances(ClusterComputeResourceConfigNewOnDemandInstancesArgs.builder()
.machineType("n2-standard-2")
.zone("us-central1-a")
.build())
.build())
.build())
.orchestrator(ClusterOrchestratorArgs.builder()
.slurm(ClusterOrchestratorSlurmArgs.builder()
.loginNodes(ClusterOrchestratorSlurmLoginNodesArgs.builder()
.machineType("n2-standard-2")
.count("1")
.zone("us-central1-a")
.bootDisk(ClusterOrchestratorSlurmLoginNodesBootDiskArgs.builder()
.sizeGb("100")
.type("pd-balanced")
.build())
.build())
.nodeSets(ClusterOrchestratorSlurmNodeSetArgs.builder()
.id("nodeset1")
.computeId("compute1")
.staticNodeCount("1")
.computeInstance(ClusterOrchestratorSlurmNodeSetComputeInstanceArgs.builder()
.bootDisk(ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs.builder()
.sizeGb("100")
.type("pd-balanced")
.build())
.build())
.build())
.partitions(ClusterOrchestratorSlurmPartitionArgs.builder()
.id("partition1")
.nodeSetIds("nodeset1")
.build())
.defaultPartition("partition1")
.build())
.build())
.build());
}
}
resources:
cluster:
type: gcp:hypercomputecluster:Cluster
properties:
clusterId: my-cluster
location: us-central1
description: Cluster Director instance created through Terraform
networkResources:
- id: network1
config:
newNetwork:
description: Network one
network: projects/${projectId}/global/networks/cluster-net1
computeResources:
- id: compute1
config:
newOnDemandInstances:
machineType: n2-standard-2
zone: us-central1-a
orchestrator:
slurm:
loginNodes:
machineType: n2-standard-2
count: 1
zone: us-central1-a
bootDisk:
sizeGb: '100'
type: pd-balanced
nodeSets:
- id: nodeset1
computeId: compute1
staticNodeCount: 1
computeInstance:
bootDisk:
sizeGb: '100'
type: pd-balanced
partitions:
- id: partition1
nodeSetIds:
- nodeset1
defaultPartition: partition1
variables:
project:
fn::invoke:
function: gcp:organizations:getProject
arguments: {}
projectId: ${project.name}
Create Cluster Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Cluster(name: string, args: ClusterArgs, opts?: CustomResourceOptions);@overload
def Cluster(resource_name: str,
args: ClusterArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Cluster(resource_name: str,
opts: Optional[ResourceOptions] = None,
cluster_id: Optional[str] = None,
location: Optional[str] = None,
compute_resources: Optional[Sequence[ClusterComputeResourceArgs]] = None,
description: Optional[str] = None,
labels: Optional[Mapping[str, str]] = None,
network_resources: Optional[Sequence[ClusterNetworkResourceArgs]] = None,
orchestrator: Optional[ClusterOrchestratorArgs] = None,
project: Optional[str] = None,
storage_resources: Optional[Sequence[ClusterStorageResourceArgs]] = None)func NewCluster(ctx *Context, name string, args ClusterArgs, opts ...ResourceOption) (*Cluster, error)public Cluster(string name, ClusterArgs args, CustomResourceOptions? opts = null)
public Cluster(String name, ClusterArgs args)
public Cluster(String name, ClusterArgs args, CustomResourceOptions options)
type: gcp:hypercomputecluster:Cluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var exampleclusterResourceResourceFromHypercomputeclustercluster = new Gcp.HyperComputeCluster.Cluster("exampleclusterResourceResourceFromHypercomputeclustercluster", new()
{
ClusterId = "string",
Location = "string",
ComputeResources = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceArgs
{
Config = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigArgs
{
NewFlexStartInstances = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigNewFlexStartInstancesArgs
{
MachineType = "string",
MaxDuration = "string",
Zone = "string",
},
NewOnDemandInstances = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigNewOnDemandInstancesArgs
{
MachineType = "string",
Zone = "string",
},
NewReservedInstances = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigNewReservedInstancesArgs
{
Reservation = "string",
},
NewSpotInstances = new Gcp.HyperComputeCluster.Inputs.ClusterComputeResourceConfigNewSpotInstancesArgs
{
MachineType = "string",
Zone = "string",
TerminationAction = "string",
},
},
Id = "string",
},
},
Description = "string",
Labels =
{
{ "string", "string" },
},
NetworkResources = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceArgs
{
Id = "string",
Config = new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceConfigArgs
{
ExistingNetwork = new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceConfigExistingNetworkArgs
{
Network = "string",
Subnetwork = "string",
},
NewNetwork = new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceConfigNewNetworkArgs
{
Network = "string",
Description = "string",
},
},
Networks = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterNetworkResourceNetworkArgs
{
Network = "string",
Subnetwork = "string",
},
},
},
},
Orchestrator = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorArgs
{
Slurm = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmArgs
{
LoginNodes = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmLoginNodesArgs
{
Count = "string",
MachineType = "string",
Zone = "string",
BootDisk = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmLoginNodesBootDiskArgs
{
SizeGb = "string",
Type = "string",
},
EnableOsLogin = false,
EnablePublicIps = false,
Instances = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmLoginNodesInstanceArgs
{
Instance = "string",
},
},
Labels =
{
{ "string", "string" },
},
StartupScript = "string",
StorageConfigs = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmLoginNodesStorageConfigArgs
{
Id = "string",
LocalMount = "string",
},
},
},
NodeSets = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetArgs
{
Id = "string",
ComputeId = "string",
ComputeInstance = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetComputeInstanceArgs
{
BootDisk = new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs
{
SizeGb = "string",
Type = "string",
},
Labels =
{
{ "string", "string" },
},
StartupScript = "string",
},
MaxDynamicNodeCount = "string",
StaticNodeCount = "string",
StorageConfigs = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmNodeSetStorageConfigArgs
{
Id = "string",
LocalMount = "string",
},
},
},
},
Partitions = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterOrchestratorSlurmPartitionArgs
{
Id = "string",
NodeSetIds = new[]
{
"string",
},
},
},
DefaultPartition = "string",
EpilogBashScripts = new[]
{
"string",
},
PrologBashScripts = new[]
{
"string",
},
},
},
Project = "string",
StorageResources = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceArgs
{
Config = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigArgs
{
ExistingBucket = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigExistingBucketArgs
{
Bucket = "string",
},
ExistingFilestore = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigExistingFilestoreArgs
{
Filestore = "string",
},
ExistingLustre = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigExistingLustreArgs
{
Lustre = "string",
},
NewBucket = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigNewBucketArgs
{
Bucket = "string",
Autoclass = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigNewBucketAutoclassArgs
{
Enabled = false,
},
HierarchicalNamespace = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigNewBucketHierarchicalNamespaceArgs
{
Enabled = false,
},
StorageClass = "string",
},
NewFilestore = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigNewFilestoreArgs
{
FileShares = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigNewFilestoreFileShareArgs
{
CapacityGb = "string",
FileShare = "string",
},
},
Filestore = "string",
Tier = "string",
Description = "string",
Protocol = "string",
},
NewLustre = new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceConfigNewLustreArgs
{
CapacityGb = "string",
Filesystem = "string",
Lustre = "string",
Description = "string",
},
},
Id = "string",
Buckets = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceBucketArgs
{
Bucket = "string",
},
},
Filestores = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceFilestoreArgs
{
Filestore = "string",
},
},
Lustres = new[]
{
new Gcp.HyperComputeCluster.Inputs.ClusterStorageResourceLustreArgs
{
Lustre = "string",
},
},
},
},
});
example, err := hypercomputecluster.NewCluster(ctx, "exampleclusterResourceResourceFromHypercomputeclustercluster", &hypercomputecluster.ClusterArgs{
ClusterId: pulumi.String("string"),
Location: pulumi.String("string"),
ComputeResources: hypercomputecluster.ClusterComputeResourceArray{
&hypercomputecluster.ClusterComputeResourceArgs{
Config: &hypercomputecluster.ClusterComputeResourceConfigArgs{
NewFlexStartInstances: &hypercomputecluster.ClusterComputeResourceConfigNewFlexStartInstancesArgs{
MachineType: pulumi.String("string"),
MaxDuration: pulumi.String("string"),
Zone: pulumi.String("string"),
},
NewOnDemandInstances: &hypercomputecluster.ClusterComputeResourceConfigNewOnDemandInstancesArgs{
MachineType: pulumi.String("string"),
Zone: pulumi.String("string"),
},
NewReservedInstances: &hypercomputecluster.ClusterComputeResourceConfigNewReservedInstancesArgs{
Reservation: pulumi.String("string"),
},
NewSpotInstances: &hypercomputecluster.ClusterComputeResourceConfigNewSpotInstancesArgs{
MachineType: pulumi.String("string"),
Zone: pulumi.String("string"),
TerminationAction: pulumi.String("string"),
},
},
Id: pulumi.String("string"),
},
},
Description: pulumi.String("string"),
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
NetworkResources: hypercomputecluster.ClusterNetworkResourceArray{
&hypercomputecluster.ClusterNetworkResourceArgs{
Id: pulumi.String("string"),
Config: &hypercomputecluster.ClusterNetworkResourceConfigArgs{
ExistingNetwork: &hypercomputecluster.ClusterNetworkResourceConfigExistingNetworkArgs{
Network: pulumi.String("string"),
Subnetwork: pulumi.String("string"),
},
NewNetwork: &hypercomputecluster.ClusterNetworkResourceConfigNewNetworkArgs{
Network: pulumi.String("string"),
Description: pulumi.String("string"),
},
},
Networks: hypercomputecluster.ClusterNetworkResourceNetworkArray{
&hypercomputecluster.ClusterNetworkResourceNetworkArgs{
Network: pulumi.String("string"),
Subnetwork: pulumi.String("string"),
},
},
},
},
Orchestrator: &hypercomputecluster.ClusterOrchestratorArgs{
Slurm: &hypercomputecluster.ClusterOrchestratorSlurmArgs{
LoginNodes: &hypercomputecluster.ClusterOrchestratorSlurmLoginNodesArgs{
Count: pulumi.String("string"),
MachineType: pulumi.String("string"),
Zone: pulumi.String("string"),
BootDisk: &hypercomputecluster.ClusterOrchestratorSlurmLoginNodesBootDiskArgs{
SizeGb: pulumi.String("string"),
Type: pulumi.String("string"),
},
EnableOsLogin: pulumi.Bool(false),
EnablePublicIps: pulumi.Bool(false),
Instances: hypercomputecluster.ClusterOrchestratorSlurmLoginNodesInstanceArray{
&hypercomputecluster.ClusterOrchestratorSlurmLoginNodesInstanceArgs{
Instance: pulumi.String("string"),
},
},
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
StartupScript: pulumi.String("string"),
StorageConfigs: hypercomputecluster.ClusterOrchestratorSlurmLoginNodesStorageConfigArray{
&hypercomputecluster.ClusterOrchestratorSlurmLoginNodesStorageConfigArgs{
Id: pulumi.String("string"),
LocalMount: pulumi.String("string"),
},
},
},
NodeSets: hypercomputecluster.ClusterOrchestratorSlurmNodeSetArray{
&hypercomputecluster.ClusterOrchestratorSlurmNodeSetArgs{
Id: pulumi.String("string"),
ComputeId: pulumi.String("string"),
ComputeInstance: &hypercomputecluster.ClusterOrchestratorSlurmNodeSetComputeInstanceArgs{
BootDisk: &hypercomputecluster.ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs{
SizeGb: pulumi.String("string"),
Type: pulumi.String("string"),
},
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
StartupScript: pulumi.String("string"),
},
MaxDynamicNodeCount: pulumi.String("string"),
StaticNodeCount: pulumi.String("string"),
StorageConfigs: hypercomputecluster.ClusterOrchestratorSlurmNodeSetStorageConfigArray{
&hypercomputecluster.ClusterOrchestratorSlurmNodeSetStorageConfigArgs{
Id: pulumi.String("string"),
LocalMount: pulumi.String("string"),
},
},
},
},
Partitions: hypercomputecluster.ClusterOrchestratorSlurmPartitionArray{
&hypercomputecluster.ClusterOrchestratorSlurmPartitionArgs{
Id: pulumi.String("string"),
NodeSetIds: pulumi.StringArray{
pulumi.String("string"),
},
},
},
DefaultPartition: pulumi.String("string"),
EpilogBashScripts: pulumi.StringArray{
pulumi.String("string"),
},
PrologBashScripts: pulumi.StringArray{
pulumi.String("string"),
},
},
},
Project: pulumi.String("string"),
StorageResources: hypercomputecluster.ClusterStorageResourceArray{
&hypercomputecluster.ClusterStorageResourceArgs{
Config: &hypercomputecluster.ClusterStorageResourceConfigArgs{
ExistingBucket: &hypercomputecluster.ClusterStorageResourceConfigExistingBucketArgs{
Bucket: pulumi.String("string"),
},
ExistingFilestore: &hypercomputecluster.ClusterStorageResourceConfigExistingFilestoreArgs{
Filestore: pulumi.String("string"),
},
ExistingLustre: &hypercomputecluster.ClusterStorageResourceConfigExistingLustreArgs{
Lustre: pulumi.String("string"),
},
NewBucket: &hypercomputecluster.ClusterStorageResourceConfigNewBucketArgs{
Bucket: pulumi.String("string"),
Autoclass: &hypercomputecluster.ClusterStorageResourceConfigNewBucketAutoclassArgs{
Enabled: pulumi.Bool(false),
},
HierarchicalNamespace: &hypercomputecluster.ClusterStorageResourceConfigNewBucketHierarchicalNamespaceArgs{
Enabled: pulumi.Bool(false),
},
StorageClass: pulumi.String("string"),
},
NewFilestore: &hypercomputecluster.ClusterStorageResourceConfigNewFilestoreArgs{
FileShares: hypercomputecluster.ClusterStorageResourceConfigNewFilestoreFileShareArray{
&hypercomputecluster.ClusterStorageResourceConfigNewFilestoreFileShareArgs{
CapacityGb: pulumi.String("string"),
FileShare: pulumi.String("string"),
},
},
Filestore: pulumi.String("string"),
Tier: pulumi.String("string"),
Description: pulumi.String("string"),
Protocol: pulumi.String("string"),
},
NewLustre: &hypercomputecluster.ClusterStorageResourceConfigNewLustreArgs{
CapacityGb: pulumi.String("string"),
Filesystem: pulumi.String("string"),
Lustre: pulumi.String("string"),
Description: pulumi.String("string"),
},
},
Id: pulumi.String("string"),
Buckets: hypercomputecluster.ClusterStorageResourceBucketArray{
&hypercomputecluster.ClusterStorageResourceBucketArgs{
Bucket: pulumi.String("string"),
},
},
Filestores: hypercomputecluster.ClusterStorageResourceFilestoreArray{
&hypercomputecluster.ClusterStorageResourceFilestoreArgs{
Filestore: pulumi.String("string"),
},
},
Lustres: hypercomputecluster.ClusterStorageResourceLustreArray{
&hypercomputecluster.ClusterStorageResourceLustreArgs{
Lustre: pulumi.String("string"),
},
},
},
},
})
var exampleclusterResourceResourceFromHypercomputeclustercluster = new com.pulumi.gcp.hypercomputecluster.Cluster("exampleclusterResourceResourceFromHypercomputeclustercluster", com.pulumi.gcp.hypercomputecluster.ClusterArgs.builder()
.clusterId("string")
.location("string")
.computeResources(ClusterComputeResourceArgs.builder()
.config(ClusterComputeResourceConfigArgs.builder()
.newFlexStartInstances(ClusterComputeResourceConfigNewFlexStartInstancesArgs.builder()
.machineType("string")
.maxDuration("string")
.zone("string")
.build())
.newOnDemandInstances(ClusterComputeResourceConfigNewOnDemandInstancesArgs.builder()
.machineType("string")
.zone("string")
.build())
.newReservedInstances(ClusterComputeResourceConfigNewReservedInstancesArgs.builder()
.reservation("string")
.build())
.newSpotInstances(ClusterComputeResourceConfigNewSpotInstancesArgs.builder()
.machineType("string")
.zone("string")
.terminationAction("string")
.build())
.build())
.id("string")
.build())
.description("string")
.labels(Map.of("string", "string"))
.networkResources(ClusterNetworkResourceArgs.builder()
.id("string")
.config(ClusterNetworkResourceConfigArgs.builder()
.existingNetwork(ClusterNetworkResourceConfigExistingNetworkArgs.builder()
.network("string")
.subnetwork("string")
.build())
.newNetwork(ClusterNetworkResourceConfigNewNetworkArgs.builder()
.network("string")
.description("string")
.build())
.build())
.networks(ClusterNetworkResourceNetworkArgs.builder()
.network("string")
.subnetwork("string")
.build())
.build())
.orchestrator(ClusterOrchestratorArgs.builder()
.slurm(ClusterOrchestratorSlurmArgs.builder()
.loginNodes(ClusterOrchestratorSlurmLoginNodesArgs.builder()
.count("string")
.machineType("string")
.zone("string")
.bootDisk(ClusterOrchestratorSlurmLoginNodesBootDiskArgs.builder()
.sizeGb("string")
.type("string")
.build())
.enableOsLogin(false)
.enablePublicIps(false)
.instances(ClusterOrchestratorSlurmLoginNodesInstanceArgs.builder()
.instance("string")
.build())
.labels(Map.of("string", "string"))
.startupScript("string")
.storageConfigs(ClusterOrchestratorSlurmLoginNodesStorageConfigArgs.builder()
.id("string")
.localMount("string")
.build())
.build())
.nodeSets(ClusterOrchestratorSlurmNodeSetArgs.builder()
.id("string")
.computeId("string")
.computeInstance(ClusterOrchestratorSlurmNodeSetComputeInstanceArgs.builder()
.bootDisk(ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs.builder()
.sizeGb("string")
.type("string")
.build())
.labels(Map.of("string", "string"))
.startupScript("string")
.build())
.maxDynamicNodeCount("string")
.staticNodeCount("string")
.storageConfigs(ClusterOrchestratorSlurmNodeSetStorageConfigArgs.builder()
.id("string")
.localMount("string")
.build())
.build())
.partitions(ClusterOrchestratorSlurmPartitionArgs.builder()
.id("string")
.nodeSetIds("string")
.build())
.defaultPartition("string")
.epilogBashScripts("string")
.prologBashScripts("string")
.build())
.build())
.project("string")
.storageResources(ClusterStorageResourceArgs.builder()
.config(ClusterStorageResourceConfigArgs.builder()
.existingBucket(ClusterStorageResourceConfigExistingBucketArgs.builder()
.bucket("string")
.build())
.existingFilestore(ClusterStorageResourceConfigExistingFilestoreArgs.builder()
.filestore("string")
.build())
.existingLustre(ClusterStorageResourceConfigExistingLustreArgs.builder()
.lustre("string")
.build())
.newBucket(ClusterStorageResourceConfigNewBucketArgs.builder()
.bucket("string")
.autoclass(ClusterStorageResourceConfigNewBucketAutoclassArgs.builder()
.enabled(false)
.build())
.hierarchicalNamespace(ClusterStorageResourceConfigNewBucketHierarchicalNamespaceArgs.builder()
.enabled(false)
.build())
.storageClass("string")
.build())
.newFilestore(ClusterStorageResourceConfigNewFilestoreArgs.builder()
.fileShares(ClusterStorageResourceConfigNewFilestoreFileShareArgs.builder()
.capacityGb("string")
.fileShare("string")
.build())
.filestore("string")
.tier("string")
.description("string")
.protocol("string")
.build())
.newLustre(ClusterStorageResourceConfigNewLustreArgs.builder()
.capacityGb("string")
.filesystem("string")
.lustre("string")
.description("string")
.build())
.build())
.id("string")
.buckets(ClusterStorageResourceBucketArgs.builder()
.bucket("string")
.build())
.filestores(ClusterStorageResourceFilestoreArgs.builder()
.filestore("string")
.build())
.lustres(ClusterStorageResourceLustreArgs.builder()
.lustre("string")
.build())
.build())
.build());
examplecluster_resource_resource_from_hypercomputeclustercluster = gcp.hypercomputecluster.Cluster("exampleclusterResourceResourceFromHypercomputeclustercluster",
cluster_id="string",
location="string",
compute_resources=[{
"config": {
"new_flex_start_instances": {
"machine_type": "string",
"max_duration": "string",
"zone": "string",
},
"new_on_demand_instances": {
"machine_type": "string",
"zone": "string",
},
"new_reserved_instances": {
"reservation": "string",
},
"new_spot_instances": {
"machine_type": "string",
"zone": "string",
"termination_action": "string",
},
},
"id": "string",
}],
description="string",
labels={
"string": "string",
},
network_resources=[{
"id": "string",
"config": {
"existing_network": {
"network": "string",
"subnetwork": "string",
},
"new_network": {
"network": "string",
"description": "string",
},
},
"networks": [{
"network": "string",
"subnetwork": "string",
}],
}],
orchestrator={
"slurm": {
"login_nodes": {
"count": "string",
"machine_type": "string",
"zone": "string",
"boot_disk": {
"size_gb": "string",
"type": "string",
},
"enable_os_login": False,
"enable_public_ips": False,
"instances": [{
"instance": "string",
}],
"labels": {
"string": "string",
},
"startup_script": "string",
"storage_configs": [{
"id": "string",
"local_mount": "string",
}],
},
"node_sets": [{
"id": "string",
"compute_id": "string",
"compute_instance": {
"boot_disk": {
"size_gb": "string",
"type": "string",
},
"labels": {
"string": "string",
},
"startup_script": "string",
},
"max_dynamic_node_count": "string",
"static_node_count": "string",
"storage_configs": [{
"id": "string",
"local_mount": "string",
}],
}],
"partitions": [{
"id": "string",
"node_set_ids": ["string"],
}],
"default_partition": "string",
"epilog_bash_scripts": ["string"],
"prolog_bash_scripts": ["string"],
},
},
project="string",
storage_resources=[{
"config": {
"existing_bucket": {
"bucket": "string",
},
"existing_filestore": {
"filestore": "string",
},
"existing_lustre": {
"lustre": "string",
},
"new_bucket": {
"bucket": "string",
"autoclass": {
"enabled": False,
},
"hierarchical_namespace": {
"enabled": False,
},
"storage_class": "string",
},
"new_filestore": {
"file_shares": [{
"capacity_gb": "string",
"file_share": "string",
}],
"filestore": "string",
"tier": "string",
"description": "string",
"protocol": "string",
},
"new_lustre": {
"capacity_gb": "string",
"filesystem": "string",
"lustre": "string",
"description": "string",
},
},
"id": "string",
"buckets": [{
"bucket": "string",
}],
"filestores": [{
"filestore": "string",
}],
"lustres": [{
"lustre": "string",
}],
}])
const exampleclusterResourceResourceFromHypercomputeclustercluster = new gcp.hypercomputecluster.Cluster("exampleclusterResourceResourceFromHypercomputeclustercluster", {
clusterId: "string",
location: "string",
computeResources: [{
config: {
newFlexStartInstances: {
machineType: "string",
maxDuration: "string",
zone: "string",
},
newOnDemandInstances: {
machineType: "string",
zone: "string",
},
newReservedInstances: {
reservation: "string",
},
newSpotInstances: {
machineType: "string",
zone: "string",
terminationAction: "string",
},
},
id: "string",
}],
description: "string",
labels: {
string: "string",
},
networkResources: [{
id: "string",
config: {
existingNetwork: {
network: "string",
subnetwork: "string",
},
newNetwork: {
network: "string",
description: "string",
},
},
networks: [{
network: "string",
subnetwork: "string",
}],
}],
orchestrator: {
slurm: {
loginNodes: {
count: "string",
machineType: "string",
zone: "string",
bootDisk: {
sizeGb: "string",
type: "string",
},
enableOsLogin: false,
enablePublicIps: false,
instances: [{
instance: "string",
}],
labels: {
string: "string",
},
startupScript: "string",
storageConfigs: [{
id: "string",
localMount: "string",
}],
},
nodeSets: [{
id: "string",
computeId: "string",
computeInstance: {
bootDisk: {
sizeGb: "string",
type: "string",
},
labels: {
string: "string",
},
startupScript: "string",
},
maxDynamicNodeCount: "string",
staticNodeCount: "string",
storageConfigs: [{
id: "string",
localMount: "string",
}],
}],
partitions: [{
id: "string",
nodeSetIds: ["string"],
}],
defaultPartition: "string",
epilogBashScripts: ["string"],
prologBashScripts: ["string"],
},
},
project: "string",
storageResources: [{
config: {
existingBucket: {
bucket: "string",
},
existingFilestore: {
filestore: "string",
},
existingLustre: {
lustre: "string",
},
newBucket: {
bucket: "string",
autoclass: {
enabled: false,
},
hierarchicalNamespace: {
enabled: false,
},
storageClass: "string",
},
newFilestore: {
fileShares: [{
capacityGb: "string",
fileShare: "string",
}],
filestore: "string",
tier: "string",
description: "string",
protocol: "string",
},
newLustre: {
capacityGb: "string",
filesystem: "string",
lustre: "string",
description: "string",
},
},
id: "string",
buckets: [{
bucket: "string",
}],
filestores: [{
filestore: "string",
}],
lustres: [{
lustre: "string",
}],
}],
});
type: gcp:hypercomputecluster:Cluster
properties:
clusterId: string
computeResources:
- config:
newFlexStartInstances:
machineType: string
maxDuration: string
zone: string
newOnDemandInstances:
machineType: string
zone: string
newReservedInstances:
reservation: string
newSpotInstances:
machineType: string
terminationAction: string
zone: string
id: string
description: string
labels:
string: string
location: string
networkResources:
- config:
existingNetwork:
network: string
subnetwork: string
newNetwork:
description: string
network: string
id: string
networks:
- network: string
subnetwork: string
orchestrator:
slurm:
defaultPartition: string
epilogBashScripts:
- string
loginNodes:
bootDisk:
sizeGb: string
type: string
count: string
enableOsLogin: false
enablePublicIps: false
instances:
- instance: string
labels:
string: string
machineType: string
startupScript: string
storageConfigs:
- id: string
localMount: string
zone: string
nodeSets:
- computeId: string
computeInstance:
bootDisk:
sizeGb: string
type: string
labels:
string: string
startupScript: string
id: string
maxDynamicNodeCount: string
staticNodeCount: string
storageConfigs:
- id: string
localMount: string
partitions:
- id: string
nodeSetIds:
- string
prologBashScripts:
- string
project: string
storageResources:
- buckets:
- bucket: string
config:
existingBucket:
bucket: string
existingFilestore:
filestore: string
existingLustre:
lustre: string
newBucket:
autoclass:
enabled: false
bucket: string
hierarchicalNamespace:
enabled: false
storageClass: string
newFilestore:
description: string
fileShares:
- capacityGb: string
fileShare: string
filestore: string
protocol: string
tier: string
newLustre:
capacityGb: string
description: string
filesystem: string
lustre: string
filestores:
- filestore: string
id: string
lustres:
- lustre: string
Cluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Cluster resource accepts the following input properties:
- Cluster
Id string - ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- Location string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - Compute
Resources List<ClusterCompute Resource> - Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- Description string
- User-provided description of the cluster.
- Labels Dictionary<string, string>
- Labels applied
to the cluster. Labels can be used to organize clusters and to filter them
in queries.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labelsfor all of the labels present on the resource. - Network
Resources List<ClusterNetwork Resource> - Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- Orchestrator
Cluster
Orchestrator - The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Storage
Resources List<ClusterStorage Resource> - Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- Cluster
Id string - ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- Location string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - Compute
Resources []ClusterCompute Resource Args - Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- Description string
- User-provided description of the cluster.
- Labels map[string]string
- Labels applied
to the cluster. Labels can be used to organize clusters and to filter them
in queries.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labelsfor all of the labels present on the resource. - Network
Resources []ClusterNetwork Resource Args - Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- Orchestrator
Cluster
Orchestrator Args - The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Storage
Resources []ClusterStorage Resource Args - Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- cluster
Id String - ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- location String
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - compute
Resources List<ClusterCompute Resource> - Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- description String
- User-provided description of the cluster.
- labels Map<String,String>
- Labels applied
to the cluster. Labels can be used to organize clusters and to filter them
in queries.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labelsfor all of the labels present on the resource. - network
Resources List<ClusterNetwork Resource> - Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- orchestrator
Cluster
Orchestrator - The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- storage
Resources List<ClusterStorage Resource> - Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- cluster
Id string - ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- location string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - compute
Resources ClusterCompute Resource[] - Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- description string
- User-provided description of the cluster.
- labels {[key: string]: string}
- Labels applied
to the cluster. Labels can be used to organize clusters and to filter them
in queries.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labelsfor all of the labels present on the resource. - network
Resources ClusterNetwork Resource[] - Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- orchestrator
Cluster
Orchestrator - The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- storage
Resources ClusterStorage Resource[] - Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- cluster_
id str - ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- location str
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - compute_
resources Sequence[ClusterCompute Resource Args] - Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- description str
- User-provided description of the cluster.
- labels Mapping[str, str]
- Labels applied
to the cluster. Labels can be used to organize clusters and to filter them
in queries.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labelsfor all of the labels present on the resource. - network_
resources Sequence[ClusterNetwork Resource Args] - Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- orchestrator
Cluster
Orchestrator Args - The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- storage_
resources Sequence[ClusterStorage Resource Args] - Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- cluster
Id String - ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- location String
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - compute
Resources List<Property Map> - Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- description String
- User-provided description of the cluster.
- labels Map<String>
- Labels applied
to the cluster. Labels can be used to organize clusters and to filter them
in queries.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labelsfor all of the labels present on the resource. - network
Resources List<Property Map> - Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- orchestrator Property Map
- The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- storage
Resources List<Property Map> - Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the Cluster resource produces the following output properties:
- Create
Time string - Time that the cluster was originally created.
- Effective
Labels Dictionary<string, string> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- Identifier. Relative resource name of the cluster, in the
format
projects/{project}/locations/{location}/clusters/{cluster}. - Pulumi
Labels Dictionary<string, string> - The combination of labels configured directly on the resource and default labels configured on the provider.
- Reconciling bool
- Indicates whether changes to the cluster are currently in flight. If this
is
true, then the current state might not match the cluster's intended state. - Update
Time string - Time that the cluster was most recently updated.
- Create
Time string - Time that the cluster was originally created.
- Effective
Labels map[string]string - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- Identifier. Relative resource name of the cluster, in the
format
projects/{project}/locations/{location}/clusters/{cluster}. - Pulumi
Labels map[string]string - The combination of labels configured directly on the resource and default labels configured on the provider.
- Reconciling bool
- Indicates whether changes to the cluster are currently in flight. If this
is
true, then the current state might not match the cluster's intended state. - Update
Time string - Time that the cluster was most recently updated.
- create
Time String - Time that the cluster was originally created.
- effective
Labels Map<String,String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- Identifier. Relative resource name of the cluster, in the
format
projects/{project}/locations/{location}/clusters/{cluster}. - pulumi
Labels Map<String,String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- reconciling Boolean
- Indicates whether changes to the cluster are currently in flight. If this
is
true, then the current state might not match the cluster's intended state. - update
Time String - Time that the cluster was most recently updated.
- create
Time string - Time that the cluster was originally created.
- effective
Labels {[key: string]: string} - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id string
- The provider-assigned unique ID for this managed resource.
- name string
- Identifier. Relative resource name of the cluster, in the
format
projects/{project}/locations/{location}/clusters/{cluster}. - pulumi
Labels {[key: string]: string} - The combination of labels configured directly on the resource and default labels configured on the provider.
- reconciling boolean
- Indicates whether changes to the cluster are currently in flight. If this
is
true, then the current state might not match the cluster's intended state. - update
Time string - Time that the cluster was most recently updated.
- create_
time str - Time that the cluster was originally created.
- effective_
labels Mapping[str, str] - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id str
- The provider-assigned unique ID for this managed resource.
- name str
- Identifier. Relative resource name of the cluster, in the
format
projects/{project}/locations/{location}/clusters/{cluster}. - pulumi_
labels Mapping[str, str] - The combination of labels configured directly on the resource and default labels configured on the provider.
- reconciling bool
- Indicates whether changes to the cluster are currently in flight. If this
is
true, then the current state might not match the cluster's intended state. - update_
time str - Time that the cluster was most recently updated.
- create
Time String - Time that the cluster was originally created.
- effective
Labels Map<String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- Identifier. Relative resource name of the cluster, in the
format
projects/{project}/locations/{location}/clusters/{cluster}. - pulumi
Labels Map<String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- reconciling Boolean
- Indicates whether changes to the cluster are currently in flight. If this
is
true, then the current state might not match the cluster's intended state. - update
Time String - Time that the cluster was most recently updated.
Look up Existing Cluster Resource
Get an existing Cluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ClusterState, opts?: CustomResourceOptions): Cluster@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
cluster_id: Optional[str] = None,
compute_resources: Optional[Sequence[ClusterComputeResourceArgs]] = None,
create_time: Optional[str] = None,
description: Optional[str] = None,
effective_labels: Optional[Mapping[str, str]] = None,
labels: Optional[Mapping[str, str]] = None,
location: Optional[str] = None,
name: Optional[str] = None,
network_resources: Optional[Sequence[ClusterNetworkResourceArgs]] = None,
orchestrator: Optional[ClusterOrchestratorArgs] = None,
project: Optional[str] = None,
pulumi_labels: Optional[Mapping[str, str]] = None,
reconciling: Optional[bool] = None,
storage_resources: Optional[Sequence[ClusterStorageResourceArgs]] = None,
update_time: Optional[str] = None) -> Clusterfunc GetCluster(ctx *Context, name string, id IDInput, state *ClusterState, opts ...ResourceOption) (*Cluster, error)public static Cluster Get(string name, Input<string> id, ClusterState? state, CustomResourceOptions? opts = null)public static Cluster get(String name, Output<String> id, ClusterState state, CustomResourceOptions options)resources: _: type: gcp:hypercomputecluster:Cluster get: id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Cluster
Id string - ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- Compute
Resources List<ClusterCompute Resource> - Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- Create
Time string - Time that the cluster was originally created.
- Description string
- User-provided description of the cluster.
- Effective
Labels Dictionary<string, string> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Labels Dictionary<string, string>
- Labels applied
to the cluster. Labels can be used to organize clusters and to filter them
in queries.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labelsfor all of the labels present on the resource. - Location string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - Name string
- Identifier. Relative resource name of the cluster, in the
format
projects/{project}/locations/{location}/clusters/{cluster}. - Network
Resources List<ClusterNetwork Resource> - Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- Orchestrator
Cluster
Orchestrator - The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Pulumi
Labels Dictionary<string, string> - The combination of labels configured directly on the resource and default labels configured on the provider.
- Reconciling bool
- Indicates whether changes to the cluster are currently in flight. If this
is
true, then the current state might not match the cluster's intended state. - Storage
Resources List<ClusterStorage Resource> - Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- Update
Time string - Time that the cluster was most recently updated.
- Cluster
Id string - ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- Compute
Resources []ClusterCompute Resource Args - Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- Create
Time string - Time that the cluster was originally created.
- Description string
- User-provided description of the cluster.
- Effective
Labels map[string]string - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Labels map[string]string
- Labels applied
to the cluster. Labels can be used to organize clusters and to filter them
in queries.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labelsfor all of the labels present on the resource. - Location string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - Name string
- Identifier. Relative resource name of the cluster, in the
format
projects/{project}/locations/{location}/clusters/{cluster}. - Network
Resources []ClusterNetwork Resource Args - Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- Orchestrator
Cluster
Orchestrator Args - The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Pulumi
Labels map[string]string - The combination of labels configured directly on the resource and default labels configured on the provider.
- Reconciling bool
- Indicates whether changes to the cluster are currently in flight. If this
is
true, then the current state might not match the cluster's intended state. - Storage
Resources []ClusterStorage Resource Args - Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- Update
Time string - Time that the cluster was most recently updated.
- cluster
Id String - ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- compute
Resources List<ClusterCompute Resource> - Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- create
Time String - Time that the cluster was originally created.
- description String
- User-provided description of the cluster.
- effective
Labels Map<String,String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- labels Map<String,String>
- Labels applied
to the cluster. Labels can be used to organize clusters and to filter them
in queries.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labelsfor all of the labels present on the resource. - location String
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - name String
- Identifier. Relative resource name of the cluster, in the
format
projects/{project}/locations/{location}/clusters/{cluster}. - network
Resources List<ClusterNetwork Resource> - Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- orchestrator
Cluster
Orchestrator - The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi
Labels Map<String,String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- reconciling Boolean
- Indicates whether changes to the cluster are currently in flight. If this
is
true, then the current state might not match the cluster's intended state. - storage
Resources List<ClusterStorage Resource> - Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- update
Time String - Time that the cluster was most recently updated.
- cluster
Id string - ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- compute
Resources ClusterCompute Resource[] - Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- create
Time string - Time that the cluster was originally created.
- description string
- User-provided description of the cluster.
- effective
Labels {[key: string]: string} - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- labels {[key: string]: string}
- Labels applied
to the cluster. Labels can be used to organize clusters and to filter them
in queries.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labelsfor all of the labels present on the resource. - location string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - name string
- Identifier. Relative resource name of the cluster, in the
format
projects/{project}/locations/{location}/clusters/{cluster}. - network
Resources ClusterNetwork Resource[] - Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- orchestrator
Cluster
Orchestrator - The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi
Labels {[key: string]: string} - The combination of labels configured directly on the resource and default labels configured on the provider.
- reconciling boolean
- Indicates whether changes to the cluster are currently in flight. If this
is
true, then the current state might not match the cluster's intended state. - storage
Resources ClusterStorage Resource[] - Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- update
Time string - Time that the cluster was most recently updated.
- cluster_
id str - ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- compute_
resources Sequence[ClusterCompute Resource Args] - Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- create_
time str - Time that the cluster was originally created.
- description str
- User-provided description of the cluster.
- effective_
labels Mapping[str, str] - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- labels Mapping[str, str]
- Labels applied
to the cluster. Labels can be used to organize clusters and to filter them
in queries.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labelsfor all of the labels present on the resource. - location str
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - name str
- Identifier. Relative resource name of the cluster, in the
format
projects/{project}/locations/{location}/clusters/{cluster}. - network_
resources Sequence[ClusterNetwork Resource Args] - Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- orchestrator
Cluster
Orchestrator Args - The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi_
labels Mapping[str, str] - The combination of labels configured directly on the resource and default labels configured on the provider.
- reconciling bool
- Indicates whether changes to the cluster are currently in flight. If this
is
true, then the current state might not match the cluster's intended state. - storage_
resources Sequence[ClusterStorage Resource Args] - Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- update_
time str - Time that the cluster was most recently updated.
- cluster
Id String - ID of the cluster to create. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- compute
Resources List<Property Map> - Compute resources available to the cluster. Keys specify the ID of the compute resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- create
Time String - Time that the cluster was originally created.
- description String
- User-provided description of the cluster.
- effective
Labels Map<String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- labels Map<String>
- Labels applied
to the cluster. Labels can be used to organize clusters and to filter them
in queries.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labelsfor all of the labels present on the resource. - location String
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - name String
- Identifier. Relative resource name of the cluster, in the
format
projects/{project}/locations/{location}/clusters/{cluster}. - network
Resources List<Property Map> - Network resources available to the cluster. Must contain at most one value. Keys specify the ID of the network resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- orchestrator Property Map
- The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime. Structure is documented below.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi
Labels Map<String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- reconciling Boolean
- Indicates whether changes to the cluster are currently in flight. If this
is
true, then the current state might not match the cluster's intended state. - storage
Resources List<Property Map> - Storage resources available to the cluster. Keys specify the ID of the storage resource by which it can be referenced elsewhere, and must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters). Structure is documented below.
- update
Time String - Time that the cluster was most recently updated.
Supporting Types
ClusterComputeResource, ClusterComputeResourceArgs
- Config
Cluster
Compute Resource Config - Describes how a compute resource should be created at runtime. Structure is documented below.
- Id string
- The identifier for this object. Format specified above.
- Config
Cluster
Compute Resource Config - Describes how a compute resource should be created at runtime. Structure is documented below.
- Id string
- The identifier for this object. Format specified above.
- config
Cluster
Compute Resource Config - Describes how a compute resource should be created at runtime. Structure is documented below.
- id String
- The identifier for this object. Format specified above.
- config
Cluster
Compute Resource Config - Describes how a compute resource should be created at runtime. Structure is documented below.
- id string
- The identifier for this object. Format specified above.
- config
Cluster
Compute Resource Config - Describes how a compute resource should be created at runtime. Structure is documented below.
- id str
- The identifier for this object. Format specified above.
- config Property Map
- Describes how a compute resource should be created at runtime. Structure is documented below.
- id String
- The identifier for this object. Format specified above.
ClusterComputeResourceConfig, ClusterComputeResourceConfigArgs
- New
Flex ClusterStart Instances Compute Resource Config New Flex Start Instances - When set in a ComputeResourceConfig, indicates that VM instances should be created using Flex Start. Structure is documented below.
- New
On ClusterDemand Instances Compute Resource Config New On Demand Instances - When set in a ComputeResourceConfig, indicates that on-demand (i.e., using the standard provisioning model) VM instances should be created. Structure is documented below.
- New
Reserved ClusterInstances Compute Resource Config New Reserved Instances - When set in a ComputeResourceConfig, indicates that VM instances should be created from a reservation. Structure is documented below.
- New
Spot ClusterInstances Compute Resource Config New Spot Instances - When set in a ComputeResourceConfig, indicates that spot VM instances should be created. Structure is documented below.
- New
Flex ClusterStart Instances Compute Resource Config New Flex Start Instances - When set in a ComputeResourceConfig, indicates that VM instances should be created using Flex Start. Structure is documented below.
- New
On ClusterDemand Instances Compute Resource Config New On Demand Instances - When set in a ComputeResourceConfig, indicates that on-demand (i.e., using the standard provisioning model) VM instances should be created. Structure is documented below.
- New
Reserved ClusterInstances Compute Resource Config New Reserved Instances - When set in a ComputeResourceConfig, indicates that VM instances should be created from a reservation. Structure is documented below.
- New
Spot ClusterInstances Compute Resource Config New Spot Instances - When set in a ComputeResourceConfig, indicates that spot VM instances should be created. Structure is documented below.
- new
Flex ClusterStart Instances Compute Resource Config New Flex Start Instances - When set in a ComputeResourceConfig, indicates that VM instances should be created using Flex Start. Structure is documented below.
- new
On ClusterDemand Instances Compute Resource Config New On Demand Instances - When set in a ComputeResourceConfig, indicates that on-demand (i.e., using the standard provisioning model) VM instances should be created. Structure is documented below.
- new
Reserved ClusterInstances Compute Resource Config New Reserved Instances - When set in a ComputeResourceConfig, indicates that VM instances should be created from a reservation. Structure is documented below.
- new
Spot ClusterInstances Compute Resource Config New Spot Instances - When set in a ComputeResourceConfig, indicates that spot VM instances should be created. Structure is documented below.
- new
Flex ClusterStart Instances Compute Resource Config New Flex Start Instances - When set in a ComputeResourceConfig, indicates that VM instances should be created using Flex Start. Structure is documented below.
- new
On ClusterDemand Instances Compute Resource Config New On Demand Instances - When set in a ComputeResourceConfig, indicates that on-demand (i.e., using the standard provisioning model) VM instances should be created. Structure is documented below.
- new
Reserved ClusterInstances Compute Resource Config New Reserved Instances - When set in a ComputeResourceConfig, indicates that VM instances should be created from a reservation. Structure is documented below.
- new
Spot ClusterInstances Compute Resource Config New Spot Instances - When set in a ComputeResourceConfig, indicates that spot VM instances should be created. Structure is documented below.
- new_
flex_ Clusterstart_ instances Compute Resource Config New Flex Start Instances - When set in a ComputeResourceConfig, indicates that VM instances should be created using Flex Start. Structure is documented below.
- new_
on_ Clusterdemand_ instances Compute Resource Config New On Demand Instances - When set in a ComputeResourceConfig, indicates that on-demand (i.e., using the standard provisioning model) VM instances should be created. Structure is documented below.
- new_
reserved_ Clusterinstances Compute Resource Config New Reserved Instances - When set in a ComputeResourceConfig, indicates that VM instances should be created from a reservation. Structure is documented below.
- new_
spot_ Clusterinstances Compute Resource Config New Spot Instances - When set in a ComputeResourceConfig, indicates that spot VM instances should be created. Structure is documented below.
- new
Flex Property MapStart Instances - When set in a ComputeResourceConfig, indicates that VM instances should be created using Flex Start. Structure is documented below.
- new
On Property MapDemand Instances - When set in a ComputeResourceConfig, indicates that on-demand (i.e., using the standard provisioning model) VM instances should be created. Structure is documented below.
- new
Reserved Property MapInstances - When set in a ComputeResourceConfig, indicates that VM instances should be created from a reservation. Structure is documented below.
- new
Spot Property MapInstances - When set in a ComputeResourceConfig, indicates that spot VM instances should be created. Structure is documented below.
ClusterComputeResourceConfigNewFlexStartInstances, ClusterComputeResourceConfigNewFlexStartInstancesArgs
- Machine
Type string - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - Max
Duration string - Specifies the time limit for created instances. Instances will be terminated at the end of this duration.
- Zone string
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
- Machine
Type string - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - Max
Duration string - Specifies the time limit for created instances. Instances will be terminated at the end of this duration.
- Zone string
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
- machine
Type String - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - max
Duration String - Specifies the time limit for created instances. Instances will be terminated at the end of this duration.
- zone String
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
- machine
Type string - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - max
Duration string - Specifies the time limit for created instances. Instances will be terminated at the end of this duration.
- zone string
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
- machine_
type str - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - max_
duration str - Specifies the time limit for created instances. Instances will be terminated at the end of this duration.
- zone str
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
- machine
Type String - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - max
Duration String - Specifies the time limit for created instances. Instances will be terminated at the end of this duration.
- zone String
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
ClusterComputeResourceConfigNewOnDemandInstances, ClusterComputeResourceConfigNewOnDemandInstancesArgs
- Machine
Type string - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - Zone string
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
- Machine
Type string - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - Zone string
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
- machine
Type String - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - zone String
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
- machine
Type string - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - zone string
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
- machine_
type str - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - zone str
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
- machine
Type String - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - zone String
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster.
ClusterComputeResourceConfigNewReservedInstances, ClusterComputeResourceConfigNewReservedInstancesArgs
- Reservation string
- Name of the reservation from which VM instances should be created, in the
format
projects/{project}/zones/{zone}/reservations/{reservation}.
- Reservation string
- Name of the reservation from which VM instances should be created, in the
format
projects/{project}/zones/{zone}/reservations/{reservation}.
- reservation String
- Name of the reservation from which VM instances should be created, in the
format
projects/{project}/zones/{zone}/reservations/{reservation}.
- reservation string
- Name of the reservation from which VM instances should be created, in the
format
projects/{project}/zones/{zone}/reservations/{reservation}.
- reservation str
- Name of the reservation from which VM instances should be created, in the
format
projects/{project}/zones/{zone}/reservations/{reservation}.
- reservation String
- Name of the reservation from which VM instances should be created, in the
format
projects/{project}/zones/{zone}/reservations/{reservation}.
ClusterComputeResourceConfigNewSpotInstances, ClusterComputeResourceConfigNewSpotInstancesArgs
- Machine
Type string - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - Zone string
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster. - Termination
Action string - Specifies the termination action of the instance Possible values: STOP DELETE
- Machine
Type string - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - Zone string
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster. - Termination
Action string - Specifies the termination action of the instance Possible values: STOP DELETE
- machine
Type String - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - zone String
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster. - termination
Action String - Specifies the termination action of the instance Possible values: STOP DELETE
- machine
Type string - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - zone string
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster. - termination
Action string - Specifies the termination action of the instance Possible values: STOP DELETE
- machine_
type str - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - zone str
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster. - termination_
action str - Specifies the termination action of the instance Possible values: STOP DELETE
- machine
Type String - Name of the Compute Engine machine
type to use, e.g.
n2-standard-2. - zone String
- Name of the zone in which VM instances should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster. - termination
Action String - Specifies the termination action of the instance Possible values: STOP DELETE
ClusterNetworkResource, ClusterNetworkResourceArgs
- Id string
- The identifier for this object. Format specified above.
- Config
Cluster
Network Resource Config - Describes how a network resource should be initialized. Each network resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
- Networks
List<Cluster
Network Resource Network> - (Output) A reference to a VPC network in Google Compute Engine. Structure is documented below.
- Id string
- The identifier for this object. Format specified above.
- Config
Cluster
Network Resource Config - Describes how a network resource should be initialized. Each network resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
- Networks
[]Cluster
Network Resource Network - (Output) A reference to a VPC network in Google Compute Engine. Structure is documented below.
- id String
- The identifier for this object. Format specified above.
- config
Cluster
Network Resource Config - Describes how a network resource should be initialized. Each network resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
- networks
List<Cluster
Network Resource Network> - (Output) A reference to a VPC network in Google Compute Engine. Structure is documented below.
- id string
- The identifier for this object. Format specified above.
- config
Cluster
Network Resource Config - Describes how a network resource should be initialized. Each network resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
- networks
Cluster
Network Resource Network[] - (Output) A reference to a VPC network in Google Compute Engine. Structure is documented below.
- id str
- The identifier for this object. Format specified above.
- config
Cluster
Network Resource Config - Describes how a network resource should be initialized. Each network resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
- networks
Sequence[Cluster
Network Resource Network] - (Output) A reference to a VPC network in Google Compute Engine. Structure is documented below.
- id String
- The identifier for this object. Format specified above.
- config Property Map
- Describes how a network resource should be initialized. Each network resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
- networks List<Property Map>
- (Output) A reference to a VPC network in Google Compute Engine. Structure is documented below.
ClusterNetworkResourceConfig, ClusterNetworkResourceConfigArgs
- Existing
Network ClusterNetwork Resource Config Existing Network - When set in a NetworkResourceConfig, indicates that an existing network should be imported. Structure is documented below.
- New
Network ClusterNetwork Resource Config New Network - When set in a NetworkResourceConfig, indicates that a new network should be created. Structure is documented below.
- Existing
Network ClusterNetwork Resource Config Existing Network - When set in a NetworkResourceConfig, indicates that an existing network should be imported. Structure is documented below.
- New
Network ClusterNetwork Resource Config New Network - When set in a NetworkResourceConfig, indicates that a new network should be created. Structure is documented below.
- existing
Network ClusterNetwork Resource Config Existing Network - When set in a NetworkResourceConfig, indicates that an existing network should be imported. Structure is documented below.
- new
Network ClusterNetwork Resource Config New Network - When set in a NetworkResourceConfig, indicates that a new network should be created. Structure is documented below.
- existing
Network ClusterNetwork Resource Config Existing Network - When set in a NetworkResourceConfig, indicates that an existing network should be imported. Structure is documented below.
- new
Network ClusterNetwork Resource Config New Network - When set in a NetworkResourceConfig, indicates that a new network should be created. Structure is documented below.
- existing_
network ClusterNetwork Resource Config Existing Network - When set in a NetworkResourceConfig, indicates that an existing network should be imported. Structure is documented below.
- new_
network ClusterNetwork Resource Config New Network - When set in a NetworkResourceConfig, indicates that a new network should be created. Structure is documented below.
- existing
Network Property Map - When set in a NetworkResourceConfig, indicates that an existing network should be imported. Structure is documented below.
- new
Network Property Map - When set in a NetworkResourceConfig, indicates that a new network should be created. Structure is documented below.
ClusterNetworkResourceConfigExistingNetwork, ClusterNetworkResourceConfigExistingNetworkArgs
- Network string
- Name of the network to import, in the format
projects/{project}/global/networks/{network}. - Subnetwork string
- Particular subnetwork to use, in the format
projects/{project}/regions/{region}/subnetworks/{subnetwork}.
- Network string
- Name of the network to import, in the format
projects/{project}/global/networks/{network}. - Subnetwork string
- Particular subnetwork to use, in the format
projects/{project}/regions/{region}/subnetworks/{subnetwork}.
- network String
- Name of the network to import, in the format
projects/{project}/global/networks/{network}. - subnetwork String
- Particular subnetwork to use, in the format
projects/{project}/regions/{region}/subnetworks/{subnetwork}.
- network string
- Name of the network to import, in the format
projects/{project}/global/networks/{network}. - subnetwork string
- Particular subnetwork to use, in the format
projects/{project}/regions/{region}/subnetworks/{subnetwork}.
- network str
- Name of the network to import, in the format
projects/{project}/global/networks/{network}. - subnetwork str
- Particular subnetwork to use, in the format
projects/{project}/regions/{region}/subnetworks/{subnetwork}.
- network String
- Name of the network to import, in the format
projects/{project}/global/networks/{network}. - subnetwork String
- Particular subnetwork to use, in the format
projects/{project}/regions/{region}/subnetworks/{subnetwork}.
ClusterNetworkResourceConfigNewNetwork, ClusterNetworkResourceConfigNewNetworkArgs
- Network string
- (Output)
Name of the network, in the format
projects/{project}/global/networks/{network}. - Description string
- Description of the network. Maximum of 2048 characters.
- Network string
- (Output)
Name of the network, in the format
projects/{project}/global/networks/{network}. - Description string
- Description of the network. Maximum of 2048 characters.
- network String
- (Output)
Name of the network, in the format
projects/{project}/global/networks/{network}. - description String
- Description of the network. Maximum of 2048 characters.
- network string
- (Output)
Name of the network, in the format
projects/{project}/global/networks/{network}. - description string
- Description of the network. Maximum of 2048 characters.
- network str
- (Output)
Name of the network, in the format
projects/{project}/global/networks/{network}. - description str
- Description of the network. Maximum of 2048 characters.
- network String
- (Output)
Name of the network, in the format
projects/{project}/global/networks/{network}. - description String
- Description of the network. Maximum of 2048 characters.
ClusterNetworkResourceNetwork, ClusterNetworkResourceNetworkArgs
- Network string
- Name of the network, in the format 'projects/{project}/global/networks/{network}'.
- Subnetwork string
- Name of the particular subnetwork being used by the cluster, in the format 'projects/{project}/regions/{region}/subnetworks/{subnetwork}'.
- Network string
- Name of the network, in the format 'projects/{project}/global/networks/{network}'.
- Subnetwork string
- Name of the particular subnetwork being used by the cluster, in the format 'projects/{project}/regions/{region}/subnetworks/{subnetwork}'.
- network String
- Name of the network, in the format 'projects/{project}/global/networks/{network}'.
- subnetwork String
- Name of the particular subnetwork being used by the cluster, in the format 'projects/{project}/regions/{region}/subnetworks/{subnetwork}'.
- network string
- Name of the network, in the format 'projects/{project}/global/networks/{network}'.
- subnetwork string
- Name of the particular subnetwork being used by the cluster, in the format 'projects/{project}/regions/{region}/subnetworks/{subnetwork}'.
- network str
- Name of the network, in the format 'projects/{project}/global/networks/{network}'.
- subnetwork str
- Name of the particular subnetwork being used by the cluster, in the format 'projects/{project}/regions/{region}/subnetworks/{subnetwork}'.
- network String
- Name of the network, in the format 'projects/{project}/global/networks/{network}'.
- subnetwork String
- Name of the particular subnetwork being used by the cluster, in the format 'projects/{project}/regions/{region}/subnetworks/{subnetwork}'.
ClusterOrchestrator, ClusterOrchestratorArgs
- Slurm
Cluster
Orchestrator Slurm - When set in Orchestrator, indicates that the cluster should use Slurm as the orchestrator. Structure is documented below.
- Slurm
Cluster
Orchestrator Slurm - When set in Orchestrator, indicates that the cluster should use Slurm as the orchestrator. Structure is documented below.
- slurm
Cluster
Orchestrator Slurm - When set in Orchestrator, indicates that the cluster should use Slurm as the orchestrator. Structure is documented below.
- slurm
Cluster
Orchestrator Slurm - When set in Orchestrator, indicates that the cluster should use Slurm as the orchestrator. Structure is documented below.
- slurm
Cluster
Orchestrator Slurm - When set in Orchestrator, indicates that the cluster should use Slurm as the orchestrator. Structure is documented below.
- slurm Property Map
- When set in Orchestrator, indicates that the cluster should use Slurm as the orchestrator. Structure is documented below.
ClusterOrchestratorSlurm, ClusterOrchestratorSlurmArgs
- Login
Nodes ClusterOrchestrator Slurm Login Nodes - Configuration for Slurm login nodes in the cluster. Login nodes are Compute Engine VM instances that allow users to access the cluster over SSH. Structure is documented below.
- Node
Sets List<ClusterOrchestrator Slurm Node Set> - Configuration of Slurm nodesets, which define groups of compute resources that can be used by Slurm. At least one compute node is required. Structure is documented below.
- Partitions
List<Cluster
Orchestrator Slurm Partition> - Configuration of Slurm partitions, which group one or more nodesets. Acts as a queue against which jobs can be submitted. At least one partition is required. Structure is documented below.
- Default
Partition string - Default partition to use for submitted jobs that do not explicitly specify a partition. Required if and only if there is more than one partition, in which case it must match the id of one of the partitions.
- Epilog
Bash List<string>Scripts - Slurm epilog scripts, which will be executed by compute nodes whenever a node finishes running a job. Values must not be empty.
- Prolog
Bash List<string>Scripts - Slurm prolog scripts, which will be executed by compute nodes before a node begins running a new job. Values must not be empty.
- Login
Nodes ClusterOrchestrator Slurm Login Nodes - Configuration for Slurm login nodes in the cluster. Login nodes are Compute Engine VM instances that allow users to access the cluster over SSH. Structure is documented below.
- Node
Sets []ClusterOrchestrator Slurm Node Set - Configuration of Slurm nodesets, which define groups of compute resources that can be used by Slurm. At least one compute node is required. Structure is documented below.
- Partitions
[]Cluster
Orchestrator Slurm Partition - Configuration of Slurm partitions, which group one or more nodesets. Acts as a queue against which jobs can be submitted. At least one partition is required. Structure is documented below.
- Default
Partition string - Default partition to use for submitted jobs that do not explicitly specify a partition. Required if and only if there is more than one partition, in which case it must match the id of one of the partitions.
- Epilog
Bash []stringScripts - Slurm epilog scripts, which will be executed by compute nodes whenever a node finishes running a job. Values must not be empty.
- Prolog
Bash []stringScripts - Slurm prolog scripts, which will be executed by compute nodes before a node begins running a new job. Values must not be empty.
- login
Nodes ClusterOrchestrator Slurm Login Nodes - Configuration for Slurm login nodes in the cluster. Login nodes are Compute Engine VM instances that allow users to access the cluster over SSH. Structure is documented below.
- node
Sets List<ClusterOrchestrator Slurm Node Set> - Configuration of Slurm nodesets, which define groups of compute resources that can be used by Slurm. At least one compute node is required. Structure is documented below.
- partitions
List<Cluster
Orchestrator Slurm Partition> - Configuration of Slurm partitions, which group one or more nodesets. Acts as a queue against which jobs can be submitted. At least one partition is required. Structure is documented below.
- default
Partition String - Default partition to use for submitted jobs that do not explicitly specify a partition. Required if and only if there is more than one partition, in which case it must match the id of one of the partitions.
- epilog
Bash List<String>Scripts - Slurm epilog scripts, which will be executed by compute nodes whenever a node finishes running a job. Values must not be empty.
- prolog
Bash List<String>Scripts - Slurm prolog scripts, which will be executed by compute nodes before a node begins running a new job. Values must not be empty.
- login
Nodes ClusterOrchestrator Slurm Login Nodes - Configuration for Slurm login nodes in the cluster. Login nodes are Compute Engine VM instances that allow users to access the cluster over SSH. Structure is documented below.
- node
Sets ClusterOrchestrator Slurm Node Set[] - Configuration of Slurm nodesets, which define groups of compute resources that can be used by Slurm. At least one compute node is required. Structure is documented below.
- partitions
Cluster
Orchestrator Slurm Partition[] - Configuration of Slurm partitions, which group one or more nodesets. Acts as a queue against which jobs can be submitted. At least one partition is required. Structure is documented below.
- default
Partition string - Default partition to use for submitted jobs that do not explicitly specify a partition. Required if and only if there is more than one partition, in which case it must match the id of one of the partitions.
- epilog
Bash string[]Scripts - Slurm epilog scripts, which will be executed by compute nodes whenever a node finishes running a job. Values must not be empty.
- prolog
Bash string[]Scripts - Slurm prolog scripts, which will be executed by compute nodes before a node begins running a new job. Values must not be empty.
- login_
nodes ClusterOrchestrator Slurm Login Nodes - Configuration for Slurm login nodes in the cluster. Login nodes are Compute Engine VM instances that allow users to access the cluster over SSH. Structure is documented below.
- node_
sets Sequence[ClusterOrchestrator Slurm Node Set] - Configuration of Slurm nodesets, which define groups of compute resources that can be used by Slurm. At least one compute node is required. Structure is documented below.
- partitions
Sequence[Cluster
Orchestrator Slurm Partition] - Configuration of Slurm partitions, which group one or more nodesets. Acts as a queue against which jobs can be submitted. At least one partition is required. Structure is documented below.
- default_
partition str - Default partition to use for submitted jobs that do not explicitly specify a partition. Required if and only if there is more than one partition, in which case it must match the id of one of the partitions.
- epilog_
bash_ Sequence[str]scripts - Slurm epilog scripts, which will be executed by compute nodes whenever a node finishes running a job. Values must not be empty.
- prolog_
bash_ Sequence[str]scripts - Slurm prolog scripts, which will be executed by compute nodes before a node begins running a new job. Values must not be empty.
- login
Nodes Property Map - Configuration for Slurm login nodes in the cluster. Login nodes are Compute Engine VM instances that allow users to access the cluster over SSH. Structure is documented below.
- node
Sets List<Property Map> - Configuration of Slurm nodesets, which define groups of compute resources that can be used by Slurm. At least one compute node is required. Structure is documented below.
- partitions List<Property Map>
- Configuration of Slurm partitions, which group one or more nodesets. Acts as a queue against which jobs can be submitted. At least one partition is required. Structure is documented below.
- default
Partition String - Default partition to use for submitted jobs that do not explicitly specify a partition. Required if and only if there is more than one partition, in which case it must match the id of one of the partitions.
- epilog
Bash List<String>Scripts - Slurm epilog scripts, which will be executed by compute nodes whenever a node finishes running a job. Values must not be empty.
- prolog
Bash List<String>Scripts - Slurm prolog scripts, which will be executed by compute nodes before a node begins running a new job. Values must not be empty.
ClusterOrchestratorSlurmLoginNodes, ClusterOrchestratorSlurmLoginNodesArgs
- Count string
- Number of login node instances to create.
- Machine
Type string - Name of the Compute Engine machine
type to use for
login nodes, e.g.
n2-standard-2. - Zone string
- Name of the zone in which login nodes should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster. - Boot
Disk ClusterOrchestrator Slurm Login Nodes Boot Disk - A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
- Enable
Os boolLogin - Whether OS Login should be enabled on login node instances.
- Enable
Public boolIps - Whether login node instances should be assigned external IP addresses.
- Instances
List<Cluster
Orchestrator Slurm Login Nodes Instance> - (Output) Information about the login node instances that were created in Compute Engine. Structure is documented below.
- Labels Dictionary<string, string>
- Labels that should be applied to each login node instance.
- Startup
Script string - Startup
script
to be run on each login node instance. Max 256KB.
The script must complete within the system-defined default timeout of 5
minutes. For tasks that require more time, consider running them in the
background using methods such as
&ornohup. - Storage
Configs List<ClusterOrchestrator Slurm Login Nodes Storage Config> - How storage resources should be mounted on each login node. Structure is documented below.
- Count string
- Number of login node instances to create.
- Machine
Type string - Name of the Compute Engine machine
type to use for
login nodes, e.g.
n2-standard-2. - Zone string
- Name of the zone in which login nodes should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster. - Boot
Disk ClusterOrchestrator Slurm Login Nodes Boot Disk - A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
- Enable
Os boolLogin - Whether OS Login should be enabled on login node instances.
- Enable
Public boolIps - Whether login node instances should be assigned external IP addresses.
- Instances
[]Cluster
Orchestrator Slurm Login Nodes Instance - (Output) Information about the login node instances that were created in Compute Engine. Structure is documented below.
- Labels map[string]string
- Labels that should be applied to each login node instance.
- Startup
Script string - Startup
script
to be run on each login node instance. Max 256KB.
The script must complete within the system-defined default timeout of 5
minutes. For tasks that require more time, consider running them in the
background using methods such as
&ornohup. - Storage
Configs []ClusterOrchestrator Slurm Login Nodes Storage Config - How storage resources should be mounted on each login node. Structure is documented below.
- count String
- Number of login node instances to create.
- machine
Type String - Name of the Compute Engine machine
type to use for
login nodes, e.g.
n2-standard-2. - zone String
- Name of the zone in which login nodes should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster. - boot
Disk ClusterOrchestrator Slurm Login Nodes Boot Disk - A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
- enable
Os BooleanLogin - Whether OS Login should be enabled on login node instances.
- enable
Public BooleanIps - Whether login node instances should be assigned external IP addresses.
- instances
List<Cluster
Orchestrator Slurm Login Nodes Instance> - (Output) Information about the login node instances that were created in Compute Engine. Structure is documented below.
- labels Map<String,String>
- Labels that should be applied to each login node instance.
- startup
Script String - Startup
script
to be run on each login node instance. Max 256KB.
The script must complete within the system-defined default timeout of 5
minutes. For tasks that require more time, consider running them in the
background using methods such as
&ornohup. - storage
Configs List<ClusterOrchestrator Slurm Login Nodes Storage Config> - How storage resources should be mounted on each login node. Structure is documented below.
- count string
- Number of login node instances to create.
- machine
Type string - Name of the Compute Engine machine
type to use for
login nodes, e.g.
n2-standard-2. - zone string
- Name of the zone in which login nodes should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster. - boot
Disk ClusterOrchestrator Slurm Login Nodes Boot Disk - A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
- enable
Os booleanLogin - Whether OS Login should be enabled on login node instances.
- enable
Public booleanIps - Whether login node instances should be assigned external IP addresses.
- instances
Cluster
Orchestrator Slurm Login Nodes Instance[] - (Output) Information about the login node instances that were created in Compute Engine. Structure is documented below.
- labels {[key: string]: string}
- Labels that should be applied to each login node instance.
- startup
Script string - Startup
script
to be run on each login node instance. Max 256KB.
The script must complete within the system-defined default timeout of 5
minutes. For tasks that require more time, consider running them in the
background using methods such as
&ornohup. - storage
Configs ClusterOrchestrator Slurm Login Nodes Storage Config[] - How storage resources should be mounted on each login node. Structure is documented below.
- count str
- Number of login node instances to create.
- machine_
type str - Name of the Compute Engine machine
type to use for
login nodes, e.g.
n2-standard-2. - zone str
- Name of the zone in which login nodes should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster. - boot_
disk ClusterOrchestrator Slurm Login Nodes Boot Disk - A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
- enable_
os_ boollogin - Whether OS Login should be enabled on login node instances.
- enable_
public_ boolips - Whether login node instances should be assigned external IP addresses.
- instances
Sequence[Cluster
Orchestrator Slurm Login Nodes Instance] - (Output) Information about the login node instances that were created in Compute Engine. Structure is documented below.
- labels Mapping[str, str]
- Labels that should be applied to each login node instance.
- startup_
script str - Startup
script
to be run on each login node instance. Max 256KB.
The script must complete within the system-defined default timeout of 5
minutes. For tasks that require more time, consider running them in the
background using methods such as
&ornohup. - storage_
configs Sequence[ClusterOrchestrator Slurm Login Nodes Storage Config] - How storage resources should be mounted on each login node. Structure is documented below.
- count String
- Number of login node instances to create.
- machine
Type String - Name of the Compute Engine machine
type to use for
login nodes, e.g.
n2-standard-2. - zone String
- Name of the zone in which login nodes should run, e.g.,
us-central1-a. Must be in the same region as the cluster, and must match the zone of any other resources specified in the cluster. - boot
Disk Property Map - A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
- enable
Os BooleanLogin - Whether OS Login should be enabled on login node instances.
- enable
Public BooleanIps - Whether login node instances should be assigned external IP addresses.
- instances List<Property Map>
- (Output) Information about the login node instances that were created in Compute Engine. Structure is documented below.
- labels Map<String>
- Labels that should be applied to each login node instance.
- startup
Script String - Startup
script
to be run on each login node instance. Max 256KB.
The script must complete within the system-defined default timeout of 5
minutes. For tasks that require more time, consider running them in the
background using methods such as
&ornohup. - storage
Configs List<Property Map> - How storage resources should be mounted on each login node. Structure is documented below.
ClusterOrchestratorSlurmLoginNodesBootDisk, ClusterOrchestratorSlurmLoginNodesBootDiskArgs
- Size
Gb string - Size of the disk in gigabytes. Must be at least 10GB.
- Type string
- Persistent disk
type, in the
format
projects/{project}/zones/{zone}/diskTypes/{disk_type}.
- Size
Gb string - Size of the disk in gigabytes. Must be at least 10GB.
- Type string
- Persistent disk
type, in the
format
projects/{project}/zones/{zone}/diskTypes/{disk_type}.
- size
Gb String - Size of the disk in gigabytes. Must be at least 10GB.
- type String
- Persistent disk
type, in the
format
projects/{project}/zones/{zone}/diskTypes/{disk_type}.
- size
Gb string - Size of the disk in gigabytes. Must be at least 10GB.
- type string
- Persistent disk
type, in the
format
projects/{project}/zones/{zone}/diskTypes/{disk_type}.
- size_
gb str - Size of the disk in gigabytes. Must be at least 10GB.
- type str
- Persistent disk
type, in the
format
projects/{project}/zones/{zone}/diskTypes/{disk_type}.
- size
Gb String - Size of the disk in gigabytes. Must be at least 10GB.
- type String
- Persistent disk
type, in the
format
projects/{project}/zones/{zone}/diskTypes/{disk_type}.
ClusterOrchestratorSlurmLoginNodesInstance, ClusterOrchestratorSlurmLoginNodesInstanceArgs
- Instance string
- Name of the VM instance, in the format 'projects/{project}/zones/{zone}/instances/{instance}'.
- Instance string
- Name of the VM instance, in the format 'projects/{project}/zones/{zone}/instances/{instance}'.
- instance String
- Name of the VM instance, in the format 'projects/{project}/zones/{zone}/instances/{instance}'.
- instance string
- Name of the VM instance, in the format 'projects/{project}/zones/{zone}/instances/{instance}'.
- instance str
- Name of the VM instance, in the format 'projects/{project}/zones/{zone}/instances/{instance}'.
- instance String
- Name of the VM instance, in the format 'projects/{project}/zones/{zone}/instances/{instance}'.
ClusterOrchestratorSlurmLoginNodesStorageConfig, ClusterOrchestratorSlurmLoginNodesStorageConfigArgs
- Id string
- ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
- Local
Mount string - A directory inside the VM instance's file system where the storage resource
should be mounted (e.g.,
/mnt/share).
- Id string
- ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
- Local
Mount string - A directory inside the VM instance's file system where the storage resource
should be mounted (e.g.,
/mnt/share).
- id String
- ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
- local
Mount String - A directory inside the VM instance's file system where the storage resource
should be mounted (e.g.,
/mnt/share).
- id string
- ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
- local
Mount string - A directory inside the VM instance's file system where the storage resource
should be mounted (e.g.,
/mnt/share).
- id str
- ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
- local_
mount str - A directory inside the VM instance's file system where the storage resource
should be mounted (e.g.,
/mnt/share).
- id String
- ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
- local
Mount String - A directory inside the VM instance's file system where the storage resource
should be mounted (e.g.,
/mnt/share).
ClusterOrchestratorSlurmNodeSet, ClusterOrchestratorSlurmNodeSetArgs
- Id string
- Identifier for the nodeset, which allows it to be referenced by partitions. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- Compute
Id string - ID of the compute resource on which this nodeset will run. Must match a key in the cluster's compute_resources.
- Compute
Instance ClusterOrchestrator Slurm Node Set Compute Instance - When set in a SlurmNodeSet, indicates that the nodeset should be backed by Compute Engine VM instances. Structure is documented below.
- Max
Dynamic stringNode Count - Controls how many additional nodes a cluster can bring online to handle workloads. Set this value to enable dynamic node creation and limit the number of additional nodes the cluster can bring online. Leave empty if you do not want the cluster to create nodes dynamically, and instead rely only on static nodes.
- Static
Node stringCount - Number of nodes to be statically created for this nodeset. The cluster will attempt to ensure that at least this many nodes exist at all times.
- Storage
Configs List<ClusterOrchestrator Slurm Node Set Storage Config> - How storage resources should be mounted on each compute node. Structure is documented below.
- Id string
- Identifier for the nodeset, which allows it to be referenced by partitions. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- Compute
Id string - ID of the compute resource on which this nodeset will run. Must match a key in the cluster's compute_resources.
- Compute
Instance ClusterOrchestrator Slurm Node Set Compute Instance - When set in a SlurmNodeSet, indicates that the nodeset should be backed by Compute Engine VM instances. Structure is documented below.
- Max
Dynamic stringNode Count - Controls how many additional nodes a cluster can bring online to handle workloads. Set this value to enable dynamic node creation and limit the number of additional nodes the cluster can bring online. Leave empty if you do not want the cluster to create nodes dynamically, and instead rely only on static nodes.
- Static
Node stringCount - Number of nodes to be statically created for this nodeset. The cluster will attempt to ensure that at least this many nodes exist at all times.
- Storage
Configs []ClusterOrchestrator Slurm Node Set Storage Config - How storage resources should be mounted on each compute node. Structure is documented below.
- id String
- Identifier for the nodeset, which allows it to be referenced by partitions. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- compute
Id String - ID of the compute resource on which this nodeset will run. Must match a key in the cluster's compute_resources.
- compute
Instance ClusterOrchestrator Slurm Node Set Compute Instance - When set in a SlurmNodeSet, indicates that the nodeset should be backed by Compute Engine VM instances. Structure is documented below.
- max
Dynamic StringNode Count - Controls how many additional nodes a cluster can bring online to handle workloads. Set this value to enable dynamic node creation and limit the number of additional nodes the cluster can bring online. Leave empty if you do not want the cluster to create nodes dynamically, and instead rely only on static nodes.
- static
Node StringCount - Number of nodes to be statically created for this nodeset. The cluster will attempt to ensure that at least this many nodes exist at all times.
- storage
Configs List<ClusterOrchestrator Slurm Node Set Storage Config> - How storage resources should be mounted on each compute node. Structure is documented below.
- id string
- Identifier for the nodeset, which allows it to be referenced by partitions. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- compute
Id string - ID of the compute resource on which this nodeset will run. Must match a key in the cluster's compute_resources.
- compute
Instance ClusterOrchestrator Slurm Node Set Compute Instance - When set in a SlurmNodeSet, indicates that the nodeset should be backed by Compute Engine VM instances. Structure is documented below.
- max
Dynamic stringNode Count - Controls how many additional nodes a cluster can bring online to handle workloads. Set this value to enable dynamic node creation and limit the number of additional nodes the cluster can bring online. Leave empty if you do not want the cluster to create nodes dynamically, and instead rely only on static nodes.
- static
Node stringCount - Number of nodes to be statically created for this nodeset. The cluster will attempt to ensure that at least this many nodes exist at all times.
- storage
Configs ClusterOrchestrator Slurm Node Set Storage Config[] - How storage resources should be mounted on each compute node. Structure is documented below.
- id str
- Identifier for the nodeset, which allows it to be referenced by partitions. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- compute_
id str - ID of the compute resource on which this nodeset will run. Must match a key in the cluster's compute_resources.
- compute_
instance ClusterOrchestrator Slurm Node Set Compute Instance - When set in a SlurmNodeSet, indicates that the nodeset should be backed by Compute Engine VM instances. Structure is documented below.
- max_
dynamic_ strnode_ count - Controls how many additional nodes a cluster can bring online to handle workloads. Set this value to enable dynamic node creation and limit the number of additional nodes the cluster can bring online. Leave empty if you do not want the cluster to create nodes dynamically, and instead rely only on static nodes.
- static_
node_ strcount - Number of nodes to be statically created for this nodeset. The cluster will attempt to ensure that at least this many nodes exist at all times.
- storage_
configs Sequence[ClusterOrchestrator Slurm Node Set Storage Config] - How storage resources should be mounted on each compute node. Structure is documented below.
- id String
- Identifier for the nodeset, which allows it to be referenced by partitions. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- compute
Id String - ID of the compute resource on which this nodeset will run. Must match a key in the cluster's compute_resources.
- compute
Instance Property Map - When set in a SlurmNodeSet, indicates that the nodeset should be backed by Compute Engine VM instances. Structure is documented below.
- max
Dynamic StringNode Count - Controls how many additional nodes a cluster can bring online to handle workloads. Set this value to enable dynamic node creation and limit the number of additional nodes the cluster can bring online. Leave empty if you do not want the cluster to create nodes dynamically, and instead rely only on static nodes.
- static
Node StringCount - Number of nodes to be statically created for this nodeset. The cluster will attempt to ensure that at least this many nodes exist at all times.
- storage
Configs List<Property Map> - How storage resources should be mounted on each compute node. Structure is documented below.
ClusterOrchestratorSlurmNodeSetComputeInstance, ClusterOrchestratorSlurmNodeSetComputeInstanceArgs
- Boot
Disk ClusterOrchestrator Slurm Node Set Compute Instance Boot Disk - A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
- Labels Dictionary<string, string>
- Labels that should be applied to each VM instance in the nodeset.
- Startup
Script string - Startup script to be run on each VM instance in the nodeset. Max 256KB.
- Boot
Disk ClusterOrchestrator Slurm Node Set Compute Instance Boot Disk - A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
- Labels map[string]string
- Labels that should be applied to each VM instance in the nodeset.
- Startup
Script string - Startup script to be run on each VM instance in the nodeset. Max 256KB.
- boot
Disk ClusterOrchestrator Slurm Node Set Compute Instance Boot Disk - A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
- labels Map<String,String>
- Labels that should be applied to each VM instance in the nodeset.
- startup
Script String - Startup script to be run on each VM instance in the nodeset. Max 256KB.
- boot
Disk ClusterOrchestrator Slurm Node Set Compute Instance Boot Disk - A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
- labels {[key: string]: string}
- Labels that should be applied to each VM instance in the nodeset.
- startup
Script string - Startup script to be run on each VM instance in the nodeset. Max 256KB.
- boot_
disk ClusterOrchestrator Slurm Node Set Compute Instance Boot Disk - A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
- labels Mapping[str, str]
- Labels that should be applied to each VM instance in the nodeset.
- startup_
script str - Startup script to be run on each VM instance in the nodeset. Max 256KB.
- boot
Disk Property Map - A Persistent disk used as the boot disk for a Compute Engine VM instance. Structure is documented below.
- labels Map<String>
- Labels that should be applied to each VM instance in the nodeset.
- startup
Script String - Startup script to be run on each VM instance in the nodeset. Max 256KB.
ClusterOrchestratorSlurmNodeSetComputeInstanceBootDisk, ClusterOrchestratorSlurmNodeSetComputeInstanceBootDiskArgs
- Size
Gb string - Size of the disk in gigabytes. Must be at least 10GB.
- Type string
- Persistent disk
type, in the
format
projects/{project}/zones/{zone}/diskTypes/{disk_type}.
- Size
Gb string - Size of the disk in gigabytes. Must be at least 10GB.
- Type string
- Persistent disk
type, in the
format
projects/{project}/zones/{zone}/diskTypes/{disk_type}.
- size
Gb String - Size of the disk in gigabytes. Must be at least 10GB.
- type String
- Persistent disk
type, in the
format
projects/{project}/zones/{zone}/diskTypes/{disk_type}.
- size
Gb string - Size of the disk in gigabytes. Must be at least 10GB.
- type string
- Persistent disk
type, in the
format
projects/{project}/zones/{zone}/diskTypes/{disk_type}.
- size_
gb str - Size of the disk in gigabytes. Must be at least 10GB.
- type str
- Persistent disk
type, in the
format
projects/{project}/zones/{zone}/diskTypes/{disk_type}.
- size
Gb String - Size of the disk in gigabytes. Must be at least 10GB.
- type String
- Persistent disk
type, in the
format
projects/{project}/zones/{zone}/diskTypes/{disk_type}.
ClusterOrchestratorSlurmNodeSetStorageConfig, ClusterOrchestratorSlurmNodeSetStorageConfigArgs
- Id string
- ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
- Local
Mount string - A directory inside the VM instance's file system where the storage resource
should be mounted (e.g.,
/mnt/share).
- Id string
- ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
- Local
Mount string - A directory inside the VM instance's file system where the storage resource
should be mounted (e.g.,
/mnt/share).
- id String
- ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
- local
Mount String - A directory inside the VM instance's file system where the storage resource
should be mounted (e.g.,
/mnt/share).
- id string
- ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
- local
Mount string - A directory inside the VM instance's file system where the storage resource
should be mounted (e.g.,
/mnt/share).
- id str
- ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
- local_
mount str - A directory inside the VM instance's file system where the storage resource
should be mounted (e.g.,
/mnt/share).
- id String
- ID of the storage resource to mount, which must match a key in the cluster's storage_resources.
- local
Mount String - A directory inside the VM instance's file system where the storage resource
should be mounted (e.g.,
/mnt/share).
ClusterOrchestratorSlurmPartition, ClusterOrchestratorSlurmPartitionArgs
- Id string
- ID of the partition, which is how users will identify it. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- Node
Set List<string>Ids - IDs of the nodesets that make up this partition. Values must match SlurmNodeSet.id.
- Id string
- ID of the partition, which is how users will identify it. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- Node
Set []stringIds - IDs of the nodesets that make up this partition. Values must match SlurmNodeSet.id.
- id String
- ID of the partition, which is how users will identify it. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- node
Set List<String>Ids - IDs of the nodesets that make up this partition. Values must match SlurmNodeSet.id.
- id string
- ID of the partition, which is how users will identify it. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- node
Set string[]Ids - IDs of the nodesets that make up this partition. Values must match SlurmNodeSet.id.
- id str
- ID of the partition, which is how users will identify it. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- node_
set_ Sequence[str]ids - IDs of the nodesets that make up this partition. Values must match SlurmNodeSet.id.
- id String
- ID of the partition, which is how users will identify it. Must conform to RFC-1034 (lower-case, alphanumeric, and at most 63 characters).
- node
Set List<String>Ids - IDs of the nodesets that make up this partition. Values must match SlurmNodeSet.id.
ClusterStorageResource, ClusterStorageResourceArgs
- Config
Cluster
Storage Resource Config - Describes how a storage resource should be initialized. Each storage resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
- Id string
- The identifier for this object. Format specified above.
- Buckets
List<Cluster
Storage Resource Bucket> - (Output) Name of the bucket.
- Filestores
List<Cluster
Storage Resource Filestore> - (Output) A reference to a Filestore instance. Structure is documented below.
- Lustres
List<Cluster
Storage Resource Lustre> (Output) A reference to a Managed Lustre instance. Structure is documented below.
<a name=<span pulumi-lang-nodejs=""nestedStorageResourcesBucket"" pulumi-lang-dotnet=""NestedStorageResourcesBucket"" pulumi-lang-go=""nestedStorageResourcesBucket"" pulumi-lang-python=""nested_storage_resources_bucket"" pulumi-lang-yaml=""nestedStorageResourcesBucket"" pulumi-lang-java=""nestedStorageResourcesBucket"">"nested_storage_resources_bucket">The
bucketblock contains:
- Config
Cluster
Storage Resource Config - Describes how a storage resource should be initialized. Each storage resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
- Id string
- The identifier for this object. Format specified above.
- Buckets
[]Cluster
Storage Resource Bucket - (Output) Name of the bucket.
- Filestores
[]Cluster
Storage Resource Filestore - (Output) A reference to a Filestore instance. Structure is documented below.
- Lustres
[]Cluster
Storage Resource Lustre (Output) A reference to a Managed Lustre instance. Structure is documented below.
<a name=<span pulumi-lang-nodejs=""nestedStorageResourcesBucket"" pulumi-lang-dotnet=""NestedStorageResourcesBucket"" pulumi-lang-go=""nestedStorageResourcesBucket"" pulumi-lang-python=""nested_storage_resources_bucket"" pulumi-lang-yaml=""nestedStorageResourcesBucket"" pulumi-lang-java=""nestedStorageResourcesBucket"">"nested_storage_resources_bucket">The
bucketblock contains:
- config
Cluster
Storage Resource Config - Describes how a storage resource should be initialized. Each storage resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
- id String
- The identifier for this object. Format specified above.
- buckets
List<Cluster
Storage Resource Bucket> - (Output) Name of the bucket.
- filestores
List<Cluster
Storage Resource Filestore> - (Output) A reference to a Filestore instance. Structure is documented below.
- lustres
List<Cluster
Storage Resource Lustre> (Output) A reference to a Managed Lustre instance. Structure is documented below.
<a name=<span pulumi-lang-nodejs=""nestedStorageResourcesBucket"" pulumi-lang-dotnet=""NestedStorageResourcesBucket"" pulumi-lang-go=""nestedStorageResourcesBucket"" pulumi-lang-python=""nested_storage_resources_bucket"" pulumi-lang-yaml=""nestedStorageResourcesBucket"" pulumi-lang-java=""nestedStorageResourcesBucket"">"nested_storage_resources_bucket">The
bucketblock contains:
- config
Cluster
Storage Resource Config - Describes how a storage resource should be initialized. Each storage resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
- id string
- The identifier for this object. Format specified above.
- buckets
Cluster
Storage Resource Bucket[] - (Output) Name of the bucket.
- filestores
Cluster
Storage Resource Filestore[] - (Output) A reference to a Filestore instance. Structure is documented below.
- lustres
Cluster
Storage Resource Lustre[] (Output) A reference to a Managed Lustre instance. Structure is documented below.
<a name=<span pulumi-lang-nodejs=""nestedStorageResourcesBucket"" pulumi-lang-dotnet=""NestedStorageResourcesBucket"" pulumi-lang-go=""nestedStorageResourcesBucket"" pulumi-lang-python=""nested_storage_resources_bucket"" pulumi-lang-yaml=""nestedStorageResourcesBucket"" pulumi-lang-java=""nestedStorageResourcesBucket"">"nested_storage_resources_bucket">The
bucketblock contains:
- config
Cluster
Storage Resource Config - Describes how a storage resource should be initialized. Each storage resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
- id str
- The identifier for this object. Format specified above.
- buckets
Sequence[Cluster
Storage Resource Bucket] - (Output) Name of the bucket.
- filestores
Sequence[Cluster
Storage Resource Filestore] - (Output) A reference to a Filestore instance. Structure is documented below.
- lustres
Sequence[Cluster
Storage Resource Lustre] (Output) A reference to a Managed Lustre instance. Structure is documented below.
<a name=<span pulumi-lang-nodejs=""nestedStorageResourcesBucket"" pulumi-lang-dotnet=""NestedStorageResourcesBucket"" pulumi-lang-go=""nestedStorageResourcesBucket"" pulumi-lang-python=""nested_storage_resources_bucket"" pulumi-lang-yaml=""nestedStorageResourcesBucket"" pulumi-lang-java=""nestedStorageResourcesBucket"">"nested_storage_resources_bucket">The
bucketblock contains:
- config Property Map
- Describes how a storage resource should be initialized. Each storage resource can either be imported from an existing Google Cloud resource or initialized when the cluster is created. Structure is documented below.
- id String
- The identifier for this object. Format specified above.
- buckets List<Property Map>
- (Output) Name of the bucket.
- filestores List<Property Map>
- (Output) A reference to a Filestore instance. Structure is documented below.
- lustres List<Property Map>
(Output) A reference to a Managed Lustre instance. Structure is documented below.
<a name=<span pulumi-lang-nodejs=""nestedStorageResourcesBucket"" pulumi-lang-dotnet=""NestedStorageResourcesBucket"" pulumi-lang-go=""nestedStorageResourcesBucket"" pulumi-lang-python=""nested_storage_resources_bucket"" pulumi-lang-yaml=""nestedStorageResourcesBucket"" pulumi-lang-java=""nestedStorageResourcesBucket"">"nested_storage_resources_bucket">The
bucketblock contains:
ClusterStorageResourceBucket, ClusterStorageResourceBucketArgs
- Bucket string
- Name of the bucket.
- Bucket string
- Name of the bucket.
- bucket String
- Name of the bucket.
- bucket string
- Name of the bucket.
- bucket str
- Name of the bucket.
- bucket String
- Name of the bucket.
ClusterStorageResourceConfig, ClusterStorageResourceConfigArgs
- Existing
Bucket ClusterStorage Resource Config Existing Bucket - When set in a StorageResourceConfig, indicates that an existing Google Cloud Storage bucket should be imported. Structure is documented below.
- Existing
Filestore ClusterStorage Resource Config Existing Filestore - When set in a StorageResourceConfig, indicates that an existing Filestore instance should be imported. Structure is documented below.
- Existing
Lustre ClusterStorage Resource Config Existing Lustre - When set in a StorageResourceConfig, indicates that an existing Managed Lustre instance should be imported. Structure is documented below.
- New
Bucket ClusterStorage Resource Config New Bucket - When set in a StorageResourceConfig, indicates that a new Google Cloud Storage bucket should be created. Structure is documented below.
- New
Filestore ClusterStorage Resource Config New Filestore - When set in a StorageResourceConfig, indicates that a new Filestore instance should be created. Structure is documented below.
- New
Lustre ClusterStorage Resource Config New Lustre - When set in a StorageResourceConfig, indicates that a new Managed Lustre instance should be created. Structure is documented below.
- Existing
Bucket ClusterStorage Resource Config Existing Bucket - When set in a StorageResourceConfig, indicates that an existing Google Cloud Storage bucket should be imported. Structure is documented below.
- Existing
Filestore ClusterStorage Resource Config Existing Filestore - When set in a StorageResourceConfig, indicates that an existing Filestore instance should be imported. Structure is documented below.
- Existing
Lustre ClusterStorage Resource Config Existing Lustre - When set in a StorageResourceConfig, indicates that an existing Managed Lustre instance should be imported. Structure is documented below.
- New
Bucket ClusterStorage Resource Config New Bucket - When set in a StorageResourceConfig, indicates that a new Google Cloud Storage bucket should be created. Structure is documented below.
- New
Filestore ClusterStorage Resource Config New Filestore - When set in a StorageResourceConfig, indicates that a new Filestore instance should be created. Structure is documented below.
- New
Lustre ClusterStorage Resource Config New Lustre - When set in a StorageResourceConfig, indicates that a new Managed Lustre instance should be created. Structure is documented below.
- existing
Bucket ClusterStorage Resource Config Existing Bucket - When set in a StorageResourceConfig, indicates that an existing Google Cloud Storage bucket should be imported. Structure is documented below.
- existing
Filestore ClusterStorage Resource Config Existing Filestore - When set in a StorageResourceConfig, indicates that an existing Filestore instance should be imported. Structure is documented below.
- existing
Lustre ClusterStorage Resource Config Existing Lustre - When set in a StorageResourceConfig, indicates that an existing Managed Lustre instance should be imported. Structure is documented below.
- new
Bucket ClusterStorage Resource Config New Bucket - When set in a StorageResourceConfig, indicates that a new Google Cloud Storage bucket should be created. Structure is documented below.
- new
Filestore ClusterStorage Resource Config New Filestore - When set in a StorageResourceConfig, indicates that a new Filestore instance should be created. Structure is documented below.
- new
Lustre ClusterStorage Resource Config New Lustre - When set in a StorageResourceConfig, indicates that a new Managed Lustre instance should be created. Structure is documented below.
- existing
Bucket ClusterStorage Resource Config Existing Bucket - When set in a StorageResourceConfig, indicates that an existing Google Cloud Storage bucket should be imported. Structure is documented below.
- existing
Filestore ClusterStorage Resource Config Existing Filestore - When set in a StorageResourceConfig, indicates that an existing Filestore instance should be imported. Structure is documented below.
- existing
Lustre ClusterStorage Resource Config Existing Lustre - When set in a StorageResourceConfig, indicates that an existing Managed Lustre instance should be imported. Structure is documented below.
- new
Bucket ClusterStorage Resource Config New Bucket - When set in a StorageResourceConfig, indicates that a new Google Cloud Storage bucket should be created. Structure is documented below.
- new
Filestore ClusterStorage Resource Config New Filestore - When set in a StorageResourceConfig, indicates that a new Filestore instance should be created. Structure is documented below.
- new
Lustre ClusterStorage Resource Config New Lustre - When set in a StorageResourceConfig, indicates that a new Managed Lustre instance should be created. Structure is documented below.
- existing_
bucket ClusterStorage Resource Config Existing Bucket - When set in a StorageResourceConfig, indicates that an existing Google Cloud Storage bucket should be imported. Structure is documented below.
- existing_
filestore ClusterStorage Resource Config Existing Filestore - When set in a StorageResourceConfig, indicates that an existing Filestore instance should be imported. Structure is documented below.
- existing_
lustre ClusterStorage Resource Config Existing Lustre - When set in a StorageResourceConfig, indicates that an existing Managed Lustre instance should be imported. Structure is documented below.
- new_
bucket ClusterStorage Resource Config New Bucket - When set in a StorageResourceConfig, indicates that a new Google Cloud Storage bucket should be created. Structure is documented below.
- new_
filestore ClusterStorage Resource Config New Filestore - When set in a StorageResourceConfig, indicates that a new Filestore instance should be created. Structure is documented below.
- new_
lustre ClusterStorage Resource Config New Lustre - When set in a StorageResourceConfig, indicates that a new Managed Lustre instance should be created. Structure is documented below.
- existing
Bucket Property Map - When set in a StorageResourceConfig, indicates that an existing Google Cloud Storage bucket should be imported. Structure is documented below.
- existing
Filestore Property Map - When set in a StorageResourceConfig, indicates that an existing Filestore instance should be imported. Structure is documented below.
- existing
Lustre Property Map - When set in a StorageResourceConfig, indicates that an existing Managed Lustre instance should be imported. Structure is documented below.
- new
Bucket Property Map - When set in a StorageResourceConfig, indicates that a new Google Cloud Storage bucket should be created. Structure is documented below.
- new
Filestore Property Map - When set in a StorageResourceConfig, indicates that a new Filestore instance should be created. Structure is documented below.
- new
Lustre Property Map - When set in a StorageResourceConfig, indicates that a new Managed Lustre instance should be created. Structure is documented below.
ClusterStorageResourceConfigExistingBucket, ClusterStorageResourceConfigExistingBucketArgs
- Bucket string
- Name of the Cloud Storage bucket to import.
- Bucket string
- Name of the Cloud Storage bucket to import.
- bucket String
- Name of the Cloud Storage bucket to import.
- bucket string
- Name of the Cloud Storage bucket to import.
- bucket str
- Name of the Cloud Storage bucket to import.
- bucket String
- Name of the Cloud Storage bucket to import.
ClusterStorageResourceConfigExistingFilestore, ClusterStorageResourceConfigExistingFilestoreArgs
- Filestore string
- Name of the Filestore instance to import, in the format
projects/{project}/locations/{location}/instances/{instance}
- Filestore string
- Name of the Filestore instance to import, in the format
projects/{project}/locations/{location}/instances/{instance}
- filestore String
- Name of the Filestore instance to import, in the format
projects/{project}/locations/{location}/instances/{instance}
- filestore string
- Name of the Filestore instance to import, in the format
projects/{project}/locations/{location}/instances/{instance}
- filestore str
- Name of the Filestore instance to import, in the format
projects/{project}/locations/{location}/instances/{instance}
- filestore String
- Name of the Filestore instance to import, in the format
projects/{project}/locations/{location}/instances/{instance}
ClusterStorageResourceConfigExistingLustre, ClusterStorageResourceConfigExistingLustreArgs
- Lustre string
- Name of the Managed Lustre instance to import, in the format
projects/{project}/locations/{location}/instances/{instance}
- Lustre string
- Name of the Managed Lustre instance to import, in the format
projects/{project}/locations/{location}/instances/{instance}
- lustre String
- Name of the Managed Lustre instance to import, in the format
projects/{project}/locations/{location}/instances/{instance}
- lustre string
- Name of the Managed Lustre instance to import, in the format
projects/{project}/locations/{location}/instances/{instance}
- lustre str
- Name of the Managed Lustre instance to import, in the format
projects/{project}/locations/{location}/instances/{instance}
- lustre String
- Name of the Managed Lustre instance to import, in the format
projects/{project}/locations/{location}/instances/{instance}
ClusterStorageResourceConfigNewBucket, ClusterStorageResourceConfigNewBucketArgs
- Bucket string
- Name of the Cloud Storage bucket to create.
- Autoclass
Cluster
Storage Resource Config New Bucket Autoclass - Message describing Google Cloud Storage autoclass configuration Structure is documented below.
- Hierarchical
Namespace ClusterStorage Resource Config New Bucket Hierarchical Namespace - Message describing Google Cloud Storage hierarchical namespace configuration Structure is documented below.
- Storage
Class string - If set, uses the provided storage class as the bucket's default storage class. Possible values: STANDARD NEARLINE COLDLINE ARCHIVE
- Bucket string
- Name of the Cloud Storage bucket to create.
- Autoclass
Cluster
Storage Resource Config New Bucket Autoclass - Message describing Google Cloud Storage autoclass configuration Structure is documented below.
- Hierarchical
Namespace ClusterStorage Resource Config New Bucket Hierarchical Namespace - Message describing Google Cloud Storage hierarchical namespace configuration Structure is documented below.
- Storage
Class string - If set, uses the provided storage class as the bucket's default storage class. Possible values: STANDARD NEARLINE COLDLINE ARCHIVE
- bucket String
- Name of the Cloud Storage bucket to create.
- autoclass
Cluster
Storage Resource Config New Bucket Autoclass - Message describing Google Cloud Storage autoclass configuration Structure is documented below.
- hierarchical
Namespace ClusterStorage Resource Config New Bucket Hierarchical Namespace - Message describing Google Cloud Storage hierarchical namespace configuration Structure is documented below.
- storage
Class String - If set, uses the provided storage class as the bucket's default storage class. Possible values: STANDARD NEARLINE COLDLINE ARCHIVE
- bucket string
- Name of the Cloud Storage bucket to create.
- autoclass
Cluster
Storage Resource Config New Bucket Autoclass - Message describing Google Cloud Storage autoclass configuration Structure is documented below.
- hierarchical
Namespace ClusterStorage Resource Config New Bucket Hierarchical Namespace - Message describing Google Cloud Storage hierarchical namespace configuration Structure is documented below.
- storage
Class string - If set, uses the provided storage class as the bucket's default storage class. Possible values: STANDARD NEARLINE COLDLINE ARCHIVE
- bucket str
- Name of the Cloud Storage bucket to create.
- autoclass
Cluster
Storage Resource Config New Bucket Autoclass - Message describing Google Cloud Storage autoclass configuration Structure is documented below.
- hierarchical_
namespace ClusterStorage Resource Config New Bucket Hierarchical Namespace - Message describing Google Cloud Storage hierarchical namespace configuration Structure is documented below.
- storage_
class str - If set, uses the provided storage class as the bucket's default storage class. Possible values: STANDARD NEARLINE COLDLINE ARCHIVE
- bucket String
- Name of the Cloud Storage bucket to create.
- autoclass Property Map
- Message describing Google Cloud Storage autoclass configuration Structure is documented below.
- hierarchical
Namespace Property Map - Message describing Google Cloud Storage hierarchical namespace configuration Structure is documented below.
- storage
Class String - If set, uses the provided storage class as the bucket's default storage class. Possible values: STANDARD NEARLINE COLDLINE ARCHIVE
ClusterStorageResourceConfigNewBucketAutoclass, ClusterStorageResourceConfigNewBucketAutoclassArgs
- Enabled bool
- Enables Auto-class feature.
- Enabled bool
- Enables Auto-class feature.
- enabled Boolean
- Enables Auto-class feature.
- enabled boolean
- Enables Auto-class feature.
- enabled bool
- Enables Auto-class feature.
- enabled Boolean
- Enables Auto-class feature.
ClusterStorageResourceConfigNewBucketHierarchicalNamespace, ClusterStorageResourceConfigNewBucketHierarchicalNamespaceArgs
- Enabled bool
- Enables hierarchical namespace setup for the bucket.
- Enabled bool
- Enables hierarchical namespace setup for the bucket.
- enabled Boolean
- Enables hierarchical namespace setup for the bucket.
- enabled boolean
- Enables hierarchical namespace setup for the bucket.
- enabled bool
- Enables hierarchical namespace setup for the bucket.
- enabled Boolean
- Enables hierarchical namespace setup for the bucket.
ClusterStorageResourceConfigNewFilestore, ClusterStorageResourceConfigNewFilestoreArgs
-
List<Cluster
Storage Resource Config New Filestore File Share> - File system shares on the instance. Exactly one file share must be specified. Structure is documented below.
- Filestore string
- Name of the Filestore instance to create, in the format
projects/{project}/locations/{location}/instances/{instance} - Tier string
- Service tier to use for the instance.
Possible values:
ZONAL
REGIONAL
Possible values are:
TIER_UNSPECIFIED,ZONAL,REGIONAL. - Description string
- Description of the instance. Maximum of 2048 characters.
- Protocol string
- Access protocol to use for all file shares in the instance. Defaults to NFS
V3 if not set.
Possible values:
NFSV3
NFSV41
Possible values are:
PROTOCOL_UNSPECIFIED,NFSV3,NFSV41.
-
[]Cluster
Storage Resource Config New Filestore File Share - File system shares on the instance. Exactly one file share must be specified. Structure is documented below.
- Filestore string
- Name of the Filestore instance to create, in the format
projects/{project}/locations/{location}/instances/{instance} - Tier string
- Service tier to use for the instance.
Possible values:
ZONAL
REGIONAL
Possible values are:
TIER_UNSPECIFIED,ZONAL,REGIONAL. - Description string
- Description of the instance. Maximum of 2048 characters.
- Protocol string
- Access protocol to use for all file shares in the instance. Defaults to NFS
V3 if not set.
Possible values:
NFSV3
NFSV41
Possible values are:
PROTOCOL_UNSPECIFIED,NFSV3,NFSV41.
-
List<Cluster
Storage Resource Config New Filestore File Share> - File system shares on the instance. Exactly one file share must be specified. Structure is documented below.
- filestore String
- Name of the Filestore instance to create, in the format
projects/{project}/locations/{location}/instances/{instance} - tier String
- Service tier to use for the instance.
Possible values:
ZONAL
REGIONAL
Possible values are:
TIER_UNSPECIFIED,ZONAL,REGIONAL. - description String
- Description of the instance. Maximum of 2048 characters.
- protocol String
- Access protocol to use for all file shares in the instance. Defaults to NFS
V3 if not set.
Possible values:
NFSV3
NFSV41
Possible values are:
PROTOCOL_UNSPECIFIED,NFSV3,NFSV41.
-
Cluster
Storage Resource Config New Filestore File Share[] - File system shares on the instance. Exactly one file share must be specified. Structure is documented below.
- filestore string
- Name of the Filestore instance to create, in the format
projects/{project}/locations/{location}/instances/{instance} - tier string
- Service tier to use for the instance.
Possible values:
ZONAL
REGIONAL
Possible values are:
TIER_UNSPECIFIED,ZONAL,REGIONAL. - description string
- Description of the instance. Maximum of 2048 characters.
- protocol string
- Access protocol to use for all file shares in the instance. Defaults to NFS
V3 if not set.
Possible values:
NFSV3
NFSV41
Possible values are:
PROTOCOL_UNSPECIFIED,NFSV3,NFSV41.
-
Sequence[Cluster
Storage Resource Config New Filestore File Share] - File system shares on the instance. Exactly one file share must be specified. Structure is documented below.
- filestore str
- Name of the Filestore instance to create, in the format
projects/{project}/locations/{location}/instances/{instance} - tier str
- Service tier to use for the instance.
Possible values:
ZONAL
REGIONAL
Possible values are:
TIER_UNSPECIFIED,ZONAL,REGIONAL. - description str
- Description of the instance. Maximum of 2048 characters.
- protocol str
- Access protocol to use for all file shares in the instance. Defaults to NFS
V3 if not set.
Possible values:
NFSV3
NFSV41
Possible values are:
PROTOCOL_UNSPECIFIED,NFSV3,NFSV41.
- List<Property Map>
- File system shares on the instance. Exactly one file share must be specified. Structure is documented below.
- filestore String
- Name of the Filestore instance to create, in the format
projects/{project}/locations/{location}/instances/{instance} - tier String
- Service tier to use for the instance.
Possible values:
ZONAL
REGIONAL
Possible values are:
TIER_UNSPECIFIED,ZONAL,REGIONAL. - description String
- Description of the instance. Maximum of 2048 characters.
- protocol String
- Access protocol to use for all file shares in the instance. Defaults to NFS
V3 if not set.
Possible values:
NFSV3
NFSV41
Possible values are:
PROTOCOL_UNSPECIFIED,NFSV3,NFSV41.
ClusterStorageResourceConfigNewFilestoreFileShare, ClusterStorageResourceConfigNewFilestoreFileShareArgs
- Capacity
Gb string - Size of the filestore in GB. Must be between 1024 and 102400, and must meet scalability requirements described at https://cloud.google.com/filestore/docs/service-tiers.
- string
- Filestore share location
- Capacity
Gb string - Size of the filestore in GB. Must be between 1024 and 102400, and must meet scalability requirements described at https://cloud.google.com/filestore/docs/service-tiers.
- string
- Filestore share location
- capacity
Gb String - Size of the filestore in GB. Must be between 1024 and 102400, and must meet scalability requirements described at https://cloud.google.com/filestore/docs/service-tiers.
- String
- Filestore share location
- capacity
Gb string - Size of the filestore in GB. Must be between 1024 and 102400, and must meet scalability requirements described at https://cloud.google.com/filestore/docs/service-tiers.
- string
- Filestore share location
- capacity_
gb str - Size of the filestore in GB. Must be between 1024 and 102400, and must meet scalability requirements described at https://cloud.google.com/filestore/docs/service-tiers.
- str
- Filestore share location
- capacity
Gb String - Size of the filestore in GB. Must be between 1024 and 102400, and must meet scalability requirements described at https://cloud.google.com/filestore/docs/service-tiers.
- String
- Filestore share location
ClusterStorageResourceConfigNewLustre, ClusterStorageResourceConfigNewLustreArgs
- Capacity
Gb string - Storage capacity of the instance in gibibytes (GiB). Allowed values are between 18000 and 7632000.
- Filesystem string
- Filesystem name for this instance. This name is used by client-side tools, including when mounting the instance. Must be 8 characters or less and can only contain letters and numbers.
- Lustre string
- (Output)
Name of the Managed Lustre instance, in the format
projects/{project}/locations/{location}/instances/{instance} - Description string
- Description of the Managed Lustre instance. Maximum of 2048 characters.
- Capacity
Gb string - Storage capacity of the instance in gibibytes (GiB). Allowed values are between 18000 and 7632000.
- Filesystem string
- Filesystem name for this instance. This name is used by client-side tools, including when mounting the instance. Must be 8 characters or less and can only contain letters and numbers.
- Lustre string
- (Output)
Name of the Managed Lustre instance, in the format
projects/{project}/locations/{location}/instances/{instance} - Description string
- Description of the Managed Lustre instance. Maximum of 2048 characters.
- capacity
Gb String - Storage capacity of the instance in gibibytes (GiB). Allowed values are between 18000 and 7632000.
- filesystem String
- Filesystem name for this instance. This name is used by client-side tools, including when mounting the instance. Must be 8 characters or less and can only contain letters and numbers.
- lustre String
- (Output)
Name of the Managed Lustre instance, in the format
projects/{project}/locations/{location}/instances/{instance} - description String
- Description of the Managed Lustre instance. Maximum of 2048 characters.
- capacity
Gb string - Storage capacity of the instance in gibibytes (GiB). Allowed values are between 18000 and 7632000.
- filesystem string
- Filesystem name for this instance. This name is used by client-side tools, including when mounting the instance. Must be 8 characters or less and can only contain letters and numbers.
- lustre string
- (Output)
Name of the Managed Lustre instance, in the format
projects/{project}/locations/{location}/instances/{instance} - description string
- Description of the Managed Lustre instance. Maximum of 2048 characters.
- capacity_
gb str - Storage capacity of the instance in gibibytes (GiB). Allowed values are between 18000 and 7632000.
- filesystem str
- Filesystem name for this instance. This name is used by client-side tools, including when mounting the instance. Must be 8 characters or less and can only contain letters and numbers.
- lustre str
- (Output)
Name of the Managed Lustre instance, in the format
projects/{project}/locations/{location}/instances/{instance} - description str
- Description of the Managed Lustre instance. Maximum of 2048 characters.
- capacity
Gb String - Storage capacity of the instance in gibibytes (GiB). Allowed values are between 18000 and 7632000.
- filesystem String
- Filesystem name for this instance. This name is used by client-side tools, including when mounting the instance. Must be 8 characters or less and can only contain letters and numbers.
- lustre String
- (Output)
Name of the Managed Lustre instance, in the format
projects/{project}/locations/{location}/instances/{instance} - description String
- Description of the Managed Lustre instance. Maximum of 2048 characters.
ClusterStorageResourceFilestore, ClusterStorageResourceFilestoreArgs
- Filestore string
- Name of the Filestore instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
- Filestore string
- Name of the Filestore instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
- filestore String
- Name of the Filestore instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
- filestore string
- Name of the Filestore instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
- filestore str
- Name of the Filestore instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
- filestore String
- Name of the Filestore instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
ClusterStorageResourceLustre, ClusterStorageResourceLustreArgs
- Lustre string
- Name of the Managed Lustre instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
- Lustre string
- Name of the Managed Lustre instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
- lustre String
- Name of the Managed Lustre instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
- lustre string
- Name of the Managed Lustre instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
- lustre str
- Name of the Managed Lustre instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
- lustre String
- Name of the Managed Lustre instance, in the format 'projects/{project}/locations/{location}/instances/{instance}'
Import
Cluster can be imported using any of these accepted formats:
projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}{{project}}/{{location}}/{{cluster_id}}{{location}}/{{cluster_id}}
When using the pulumi import command, Cluster can be imported using one of the formats above. For example:
$ pulumi import gcp:hypercomputecluster/cluster:Cluster default projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}
$ pulumi import gcp:hypercomputecluster/cluster:Cluster default {{project}}/{{location}}/{{cluster_id}}
$ pulumi import gcp:hypercomputecluster/cluster:Cluster default {{location}}/{{cluster_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-betaTerraform Provider.
published on Thursday, Mar 12, 2026 by Pulumi
