Google Cloud (GCP) Classic
Cluster
Manages a Cloud Dataproc cluster resource within GCP.
- API documentation
- How-to Guides
!> Warning: Due to limitations of the API, all arguments except
labels
,cluster_config.worker_config.num_instances
and cluster_config.preemptible_worker_config.num_instances
are non-updatable. Changing others will cause recreation of the
whole cluster!
Example Usage
Basic
using Pulumi;
using Gcp = Pulumi.Gcp;
class MyStack : Stack
{
public MyStack()
{
var simplecluster = new Gcp.Dataproc.Cluster("simplecluster", new Gcp.Dataproc.ClusterArgs
{
Region = "us-central1",
});
}
}
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataproc.NewCluster(ctx, "simplecluster", &dataproc.ClusterArgs{
Region: pulumi.String("us-central1"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var simplecluster = new Cluster("simplecluster", ClusterArgs.builder()
.region("us-central1")
.build());
}
}
import pulumi
import pulumi_gcp as gcp
simplecluster = gcp.dataproc.Cluster("simplecluster", region="us-central1")
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const simplecluster = new gcp.dataproc.Cluster("simplecluster", {
region: "us-central1",
});
resources:
simplecluster:
type: gcp:dataproc:Cluster
properties:
region: us-central1
Advanced
using Pulumi;
using Gcp = Pulumi.Gcp;
class MyStack : Stack
{
public MyStack()
{
var @default = new Gcp.ServiceAccount.Account("default", new Gcp.ServiceAccount.AccountArgs
{
AccountId = "service-account-id",
DisplayName = "Service Account",
});
var mycluster = new Gcp.Dataproc.Cluster("mycluster", new Gcp.Dataproc.ClusterArgs
{
Region = "us-central1",
GracefulDecommissionTimeout = "120s",
Labels =
{
{ "foo", "bar" },
},
ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
{
StagingBucket = "dataproc-staging-bucket",
MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
{
NumInstances = 1,
MachineType = "e2-medium",
DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigDiskConfigArgs
{
BootDiskType = "pd-ssd",
BootDiskSizeGb = 30,
},
},
WorkerConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigArgs
{
NumInstances = 2,
MachineType = "e2-medium",
MinCpuPlatform = "Intel Skylake",
DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigDiskConfigArgs
{
BootDiskSizeGb = 30,
NumLocalSsds = 1,
},
},
PreemptibleWorkerConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigArgs
{
NumInstances = 0,
},
SoftwareConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSoftwareConfigArgs
{
ImageVersion = "2.0.35-debian10",
OverrideProperties =
{
{ "dataproc:dataproc.allow.zero.workers", "true" },
},
},
GceClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigArgs
{
Tags =
{
"foo",
"bar",
},
ServiceAccount = @default.Email,
ServiceAccountScopes =
{
"cloud-platform",
},
},
InitializationActions =
{
new Gcp.Dataproc.Inputs.ClusterClusterConfigInitializationActionArgs
{
Script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh",
TimeoutSec = 500,
},
},
},
});
}
}
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/serviceAccount"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := serviceAccount.NewAccount(ctx, "default", &serviceAccount.AccountArgs{
AccountId: pulumi.String("service-account-id"),
DisplayName: pulumi.String("Service Account"),
})
if err != nil {
return err
}
_, err = dataproc.NewCluster(ctx, "mycluster", &dataproc.ClusterArgs{
Region: pulumi.String("us-central1"),
GracefulDecommissionTimeout: pulumi.String("120s"),
Labels: pulumi.StringMap{
"foo": pulumi.String("bar"),
},
ClusterConfig: &dataproc.ClusterClusterConfigArgs{
StagingBucket: pulumi.String("dataproc-staging-bucket"),
MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
NumInstances: pulumi.Int(1),
MachineType: pulumi.String("e2-medium"),
DiskConfig: &dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs{
BootDiskType: pulumi.String("pd-ssd"),
BootDiskSizeGb: pulumi.Int(30),
},
},
WorkerConfig: &dataproc.ClusterClusterConfigWorkerConfigArgs{
NumInstances: pulumi.Int(2),
MachineType: pulumi.String("e2-medium"),
MinCpuPlatform: pulumi.String("Intel Skylake"),
DiskConfig: &dataproc.ClusterClusterConfigWorkerConfigDiskConfigArgs{
BootDiskSizeGb: pulumi.Int(30),
NumLocalSsds: pulumi.Int(1),
},
},
PreemptibleWorkerConfig: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigArgs{
NumInstances: pulumi.Int(0),
},
SoftwareConfig: &dataproc.ClusterClusterConfigSoftwareConfigArgs{
ImageVersion: pulumi.String("2.0.35-debian10"),
OverrideProperties: pulumi.StringMap{
"dataproc:dataproc.allow.zero.workers": pulumi.String("true"),
},
},
GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
Tags: pulumi.StringArray{
pulumi.String("foo"),
pulumi.String("bar"),
},
ServiceAccount: _default.Email,
ServiceAccountScopes: pulumi.StringArray{
pulumi.String("cloud-platform"),
},
},
InitializationActions: dataproc.ClusterClusterConfigInitializationActionArray{
&dataproc.ClusterClusterConfigInitializationActionArgs{
Script: pulumi.String("gs://dataproc-initialization-actions/stackdriver/stackdriver.sh"),
TimeoutSec: pulumi.Int(500),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_ = new Account("default", AccountArgs.builder()
.accountId("service-account-id")
.displayName("Service Account")
.build());
var mycluster = new Cluster("mycluster", ClusterArgs.builder()
.region("us-central1")
.gracefulDecommissionTimeout("120s")
.labels(Map.of("foo", "bar"))
.clusterConfig(ClusterClusterConfigArgs.builder()
.stagingBucket("dataproc-staging-bucket")
.masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
.numInstances(1)
.machineType("e2-medium")
.diskConfig(ClusterClusterConfigMasterConfigDiskConfigArgs.builder()
.bootDiskType("pd-ssd")
.bootDiskSizeGb(30)
.build())
.build())
.workerConfig(ClusterClusterConfigWorkerConfigArgs.builder()
.numInstances(2)
.machineType("e2-medium")
.minCpuPlatform("Intel Skylake")
.diskConfig(ClusterClusterConfigWorkerConfigDiskConfigArgs.builder()
.bootDiskSizeGb(30)
.numLocalSsds(1)
.build())
.build())
.preemptibleWorkerConfig(ClusterClusterConfigPreemptibleWorkerConfigArgs.builder()
.numInstances(0)
.build())
.softwareConfig(ClusterClusterConfigSoftwareConfigArgs.builder()
.imageVersion("2.0.35-debian10")
.overrideProperties(Map.of("dataproc:dataproc.allow.zero.workers", "true"))
.build())
.gceClusterConfig(ClusterClusterConfigGceClusterConfigArgs.builder()
.tags(
"foo",
"bar")
.serviceAccount(default_.email())
.serviceAccountScopes("cloud-platform")
.build())
.initializationActions(ClusterClusterConfigInitializationActionArgs.builder()
.script("gs://dataproc-initialization-actions/stackdriver/stackdriver.sh")
.timeoutSec(500)
.build())
.build())
.build());
}
}
import pulumi
import pulumi_gcp as gcp
default = gcp.service_account.Account("default",
account_id="service-account-id",
display_name="Service Account")
mycluster = gcp.dataproc.Cluster("mycluster",
region="us-central1",
graceful_decommission_timeout="120s",
labels={
"foo": "bar",
},
cluster_config=gcp.dataproc.ClusterClusterConfigArgs(
staging_bucket="dataproc-staging-bucket",
master_config=gcp.dataproc.ClusterClusterConfigMasterConfigArgs(
num_instances=1,
machine_type="e2-medium",
disk_config=gcp.dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs(
boot_disk_type="pd-ssd",
boot_disk_size_gb=30,
),
),
worker_config=gcp.dataproc.ClusterClusterConfigWorkerConfigArgs(
num_instances=2,
machine_type="e2-medium",
min_cpu_platform="Intel Skylake",
disk_config=gcp.dataproc.ClusterClusterConfigWorkerConfigDiskConfigArgs(
boot_disk_size_gb=30,
num_local_ssds=1,
),
),
preemptible_worker_config=gcp.dataproc.ClusterClusterConfigPreemptibleWorkerConfigArgs(
num_instances=0,
),
software_config=gcp.dataproc.ClusterClusterConfigSoftwareConfigArgs(
image_version="2.0.35-debian10",
override_properties={
"dataproc:dataproc.allow.zero.workers": "true",
},
),
gce_cluster_config=gcp.dataproc.ClusterClusterConfigGceClusterConfigArgs(
tags=[
"foo",
"bar",
],
service_account=default.email,
service_account_scopes=["cloud-platform"],
),
initialization_actions=[gcp.dataproc.ClusterClusterConfigInitializationActionArgs(
script="gs://dataproc-initialization-actions/stackdriver/stackdriver.sh",
timeout_sec=500,
)],
))
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.serviceaccount.Account("default", {
accountId: "service-account-id",
displayName: "Service Account",
});
const mycluster = new gcp.dataproc.Cluster("mycluster", {
region: "us-central1",
gracefulDecommissionTimeout: "120s",
labels: {
foo: "bar",
},
clusterConfig: {
stagingBucket: "dataproc-staging-bucket",
masterConfig: {
numInstances: 1,
machineType: "e2-medium",
diskConfig: {
bootDiskType: "pd-ssd",
bootDiskSizeGb: 30,
},
},
workerConfig: {
numInstances: 2,
machineType: "e2-medium",
minCpuPlatform: "Intel Skylake",
diskConfig: {
bootDiskSizeGb: 30,
numLocalSsds: 1,
},
},
preemptibleWorkerConfig: {
numInstances: 0,
},
softwareConfig: {
imageVersion: "2.0.35-debian10",
overrideProperties: {
"dataproc:dataproc.allow.zero.workers": "true",
},
},
gceClusterConfig: {
tags: [
"foo",
"bar",
],
serviceAccount: _default.email,
serviceAccountScopes: ["cloud-platform"],
},
initializationActions: [{
script: "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh",
timeoutSec: 500,
}],
},
});
resources:
default:
type: gcp:serviceAccount:Account
properties:
accountId: service-account-id
displayName: Service Account
mycluster:
type: gcp:dataproc:Cluster
properties:
region: us-central1
gracefulDecommissionTimeout: 120s
labels:
foo: bar
clusterConfig:
stagingBucket: dataproc-staging-bucket
masterConfig:
numInstances: 1
machineType: e2-medium
diskConfig:
bootDiskType: pd-ssd
bootDiskSizeGb: 30
workerConfig:
numInstances: 2
machineType: e2-medium
minCpuPlatform: Intel Skylake
diskConfig:
bootDiskSizeGb: 30
numLocalSsds: 1
preemptibleWorkerConfig:
numInstances: 0
softwareConfig:
imageVersion: 2.0.35-debian10
overrideProperties:
dataproc:dataproc.allow.zero.workers: true
gceClusterConfig:
tags:
- foo
- bar
serviceAccount: ${default.email}
serviceAccountScopes:
- cloud-platform
initializationActions:
- script: gs://dataproc-initialization-actions/stackdriver/stackdriver.sh
timeoutSec: 500
Using A GPU Accelerator
using Pulumi;
using Gcp = Pulumi.Gcp;
class MyStack : Stack
{
public MyStack()
{
var acceleratedCluster = new Gcp.Dataproc.Cluster("acceleratedCluster", new Gcp.Dataproc.ClusterArgs
{
ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
{
GceClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigArgs
{
Zone = "us-central1-a",
},
MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
{
Accelerators =
{
new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigAcceleratorArgs
{
AcceleratorCount = 1,
AcceleratorType = "nvidia-tesla-k80",
},
},
},
},
Region = "us-central1",
});
}
}
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataproc.NewCluster(ctx, "acceleratedCluster", &dataproc.ClusterArgs{
ClusterConfig: &dataproc.ClusterClusterConfigArgs{
GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
Zone: pulumi.String("us-central1-a"),
},
MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
Accelerators: dataproc.ClusterClusterConfigMasterConfigAcceleratorArray{
&dataproc.ClusterClusterConfigMasterConfigAcceleratorArgs{
AcceleratorCount: pulumi.Int(1),
AcceleratorType: pulumi.String("nvidia-tesla-k80"),
},
},
},
},
Region: pulumi.String("us-central1"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var acceleratedCluster = new Cluster("acceleratedCluster", ClusterArgs.builder()
.clusterConfig(ClusterClusterConfigArgs.builder()
.gceClusterConfig(ClusterClusterConfigGceClusterConfigArgs.builder()
.zone("us-central1-a")
.build())
.masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
.accelerators(ClusterClusterConfigMasterConfigAcceleratorArgs.builder()
.acceleratorCount("1")
.acceleratorType("nvidia-tesla-k80")
.build())
.build())
.build())
.region("us-central1")
.build());
}
}
import pulumi
import pulumi_gcp as gcp
accelerated_cluster = gcp.dataproc.Cluster("acceleratedCluster",
cluster_config=gcp.dataproc.ClusterClusterConfigArgs(
gce_cluster_config=gcp.dataproc.ClusterClusterConfigGceClusterConfigArgs(
zone="us-central1-a",
),
master_config=gcp.dataproc.ClusterClusterConfigMasterConfigArgs(
accelerators=[gcp.dataproc.ClusterClusterConfigMasterConfigAcceleratorArgs(
accelerator_count=1,
accelerator_type="nvidia-tesla-k80",
)],
),
),
region="us-central1")
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const acceleratedCluster = new gcp.dataproc.Cluster("accelerated_cluster", {
clusterConfig: {
gceClusterConfig: {
zone: "us-central1-a",
},
masterConfig: {
accelerators: [{
acceleratorCount: 1,
acceleratorType: "nvidia-tesla-k80",
}],
},
},
region: "us-central1",
});
resources:
acceleratedCluster:
type: gcp:dataproc:Cluster
properties:
clusterConfig:
gceClusterConfig:
zone: us-central1-a
masterConfig:
accelerators:
- acceleratorCount: 1
acceleratorType: nvidia-tesla-k80
region: us-central1
Create a Cluster Resource
new Cluster(name: string, args?: ClusterArgs, opts?: CustomResourceOptions);
@overload
def Cluster(resource_name: str,
opts: Optional[ResourceOptions] = None,
cluster_config: Optional[ClusterClusterConfigArgs] = None,
graceful_decommission_timeout: Optional[str] = None,
labels: Optional[Mapping[str, str]] = None,
name: Optional[str] = None,
project: Optional[str] = None,
region: Optional[str] = None)
@overload
def Cluster(resource_name: str,
args: Optional[ClusterArgs] = None,
opts: Optional[ResourceOptions] = None)
func NewCluster(ctx *Context, name string, args *ClusterArgs, opts ...ResourceOption) (*Cluster, error)
public Cluster(string name, ClusterArgs? args = null, CustomResourceOptions? opts = null)
public Cluster(String name, ClusterArgs args)
public Cluster(String name, ClusterArgs args, CustomResourceOptions options)
type: gcp:dataproc:Cluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Cluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Cluster resource accepts the following input properties:
- Cluster
Config ClusterCluster Config Args Allows you to configure various aspects of the cluster. Structure defined below.
- Graceful
Decommission stringTimeout The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
- Labels Dictionary<string, string>
The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including
goog-dataproc-cluster-name
which is the name of the cluster.- Name string
The name of the cluster, unique within the project and zone.
- Project string
The ID of the project in which the
cluster
will exist. If it is not provided, the provider project is used.- Region string
The region in which the cluster and associated nodes will be created in. Defaults to
global
.
- Cluster
Config ClusterCluster Config Args Allows you to configure various aspects of the cluster. Structure defined below.
- Graceful
Decommission stringTimeout The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
- Labels map[string]string
The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including
goog-dataproc-cluster-name
which is the name of the cluster.- Name string
The name of the cluster, unique within the project and zone.
- Project string
The ID of the project in which the
cluster
will exist. If it is not provided, the provider project is used.- Region string
The region in which the cluster and associated nodes will be created in. Defaults to
global
.
- cluster
Config ClusterCluster Config Args Allows you to configure various aspects of the cluster. Structure defined below.
- graceful
Decommission StringTimeout The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
- labels Map<String,String>
The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including
goog-dataproc-cluster-name
which is the name of the cluster.- name String
The name of the cluster, unique within the project and zone.
- project String
The ID of the project in which the
cluster
will exist. If it is not provided, the provider project is used.- region String
The region in which the cluster and associated nodes will be created in. Defaults to
global
.
- cluster
Config ClusterCluster Config Args Allows you to configure various aspects of the cluster. Structure defined below.
- graceful
Decommission stringTimeout The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
- labels {[key: string]: string}
The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including
goog-dataproc-cluster-name
which is the name of the cluster.- name string
The name of the cluster, unique within the project and zone.
- project string
The ID of the project in which the
cluster
will exist. If it is not provided, the provider project is used.- region string
The region in which the cluster and associated nodes will be created in. Defaults to
global
.
- cluster_
config ClusterCluster Config Args Allows you to configure various aspects of the cluster. Structure defined below.
- graceful_
decommission_ strtimeout The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
- labels Mapping[str, str]
The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including
goog-dataproc-cluster-name
which is the name of the cluster.- name str
The name of the cluster, unique within the project and zone.
- project str
The ID of the project in which the
cluster
will exist. If it is not provided, the provider project is used.- region str
The region in which the cluster and associated nodes will be created in. Defaults to
global
.
- cluster
Config Property Map Allows you to configure various aspects of the cluster. Structure defined below.
- graceful
Decommission StringTimeout The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
- labels Map<String>
The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including
goog-dataproc-cluster-name
which is the name of the cluster.- name String
The name of the cluster, unique within the project and zone.
- project String
The ID of the project in which the
cluster
will exist. If it is not provided, the provider project is used.- region String
The region in which the cluster and associated nodes will be created in. Defaults to
global
.
Outputs
All input properties are implicitly available as output properties. Additionally, the Cluster resource produces the following output properties:
- Id string
The provider-assigned unique ID for this managed resource.
- Id string
The provider-assigned unique ID for this managed resource.
- id String
The provider-assigned unique ID for this managed resource.
- id string
The provider-assigned unique ID for this managed resource.
- id str
The provider-assigned unique ID for this managed resource.
- id String
The provider-assigned unique ID for this managed resource.
Look up an Existing Cluster Resource
Get an existing Cluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ClusterState, opts?: CustomResourceOptions): Cluster
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
cluster_config: Optional[ClusterClusterConfigArgs] = None,
graceful_decommission_timeout: Optional[str] = None,
labels: Optional[Mapping[str, str]] = None,
name: Optional[str] = None,
project: Optional[str] = None,
region: Optional[str] = None) -> Cluster
func GetCluster(ctx *Context, name string, id IDInput, state *ClusterState, opts ...ResourceOption) (*Cluster, error)
public static Cluster Get(string name, Input<string> id, ClusterState? state, CustomResourceOptions? opts = null)
public static Cluster get(String name, Output<String> id, ClusterState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Cluster
Config ClusterCluster Config Args Allows you to configure various aspects of the cluster. Structure defined below.
- Graceful
Decommission stringTimeout The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
- Labels Dictionary<string, string>
The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including
goog-dataproc-cluster-name
which is the name of the cluster.- Name string
The name of the cluster, unique within the project and zone.
- Project string
The ID of the project in which the
cluster
will exist. If it is not provided, the provider project is used.- Region string
The region in which the cluster and associated nodes will be created in. Defaults to
global
.
- Cluster
Config ClusterCluster Config Args Allows you to configure various aspects of the cluster. Structure defined below.
- Graceful
Decommission stringTimeout The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
- Labels map[string]string
The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including
goog-dataproc-cluster-name
which is the name of the cluster.- Name string
The name of the cluster, unique within the project and zone.
- Project string
The ID of the project in which the
cluster
will exist. If it is not provided, the provider project is used.- Region string
The region in which the cluster and associated nodes will be created in. Defaults to
global
.
- cluster
Config ClusterCluster Config Args Allows you to configure various aspects of the cluster. Structure defined below.
- graceful
Decommission StringTimeout The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
- labels Map<String,String>
The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including
goog-dataproc-cluster-name
which is the name of the cluster.- name String
The name of the cluster, unique within the project and zone.
- project String
The ID of the project in which the
cluster
will exist. If it is not provided, the provider project is used.- region String
The region in which the cluster and associated nodes will be created in. Defaults to
global
.
- cluster
Config ClusterCluster Config Args Allows you to configure various aspects of the cluster. Structure defined below.
- graceful
Decommission stringTimeout The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
- labels {[key: string]: string}
The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including
goog-dataproc-cluster-name
which is the name of the cluster.- name string
The name of the cluster, unique within the project and zone.
- project string
The ID of the project in which the
cluster
will exist. If it is not provided, the provider project is used.- region string
The region in which the cluster and associated nodes will be created in. Defaults to
global
.
- cluster_
config ClusterCluster Config Args Allows you to configure various aspects of the cluster. Structure defined below.
- graceful_
decommission_ strtimeout The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
- labels Mapping[str, str]
The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including
goog-dataproc-cluster-name
which is the name of the cluster.- name str
The name of the cluster, unique within the project and zone.
- project str
The ID of the project in which the
cluster
will exist. If it is not provided, the provider project is used.- region str
The region in which the cluster and associated nodes will be created in. Defaults to
global
.
- cluster
Config Property Map Allows you to configure various aspects of the cluster. Structure defined below.
- graceful
Decommission StringTimeout The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
- labels Map<String>
The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including
goog-dataproc-cluster-name
which is the name of the cluster.- name String
The name of the cluster, unique within the project and zone.
- project String
The ID of the project in which the
cluster
will exist. If it is not provided, the provider project is used.- region String
The region in which the cluster and associated nodes will be created in. Defaults to
global
.
Supporting Types
ClusterClusterConfig
- Autoscaling
Config ClusterCluster Config Autoscaling Config The autoscaling policy config associated with the cluster. Note that once set, if
autoscaling_config
is the only field set incluster_config
, it can only be removed by settingpolicy_uri = ""
, rather than removing the whole block. Structure defined below.- Bucket string
- Encryption
Config ClusterCluster Config Encryption Config The Customer managed encryption keys settings for the cluster. Structure defined below.
- Endpoint
Config ClusterCluster Config Endpoint Config The config settings for port access on the cluster. Structure defined below.
- Gce
Cluster ClusterConfig Cluster Config Gce Cluster Config Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
- Initialization
Actions List<ClusterCluster Config Initialization Action> Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
- Lifecycle
Config ClusterCluster Config Lifecycle Config The settings for auto deletion cluster schedule. Structure defined below.
- Master
Config ClusterCluster Config Master Config The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
- Metastore
Config ClusterCluster Config Metastore Config The config setting for metastore service with the cluster. Structure defined below.
- Preemptible
Worker ClusterConfig Cluster Config Preemptible Worker Config The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.
- NOTE :
preemptible_worker_config
is an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
- NOTE :
- Security
Config ClusterCluster Config Security Config Security related configuration. Structure defined below.
- Software
Config ClusterCluster Config Software Config The config settings for software inside the cluster. Structure defined below.
- Staging
Bucket string The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a
staging_bucket
then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.- Temp
Bucket string The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a
temp_bucket
then GCP will auto create / assign one for you.- Worker
Config ClusterCluster Config Worker Config The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
- Autoscaling
Config ClusterCluster Config Autoscaling Config The autoscaling policy config associated with the cluster. Note that once set, if
autoscaling_config
is the only field set incluster_config
, it can only be removed by settingpolicy_uri = ""
, rather than removing the whole block. Structure defined below.- Bucket string
- Encryption
Config ClusterCluster Config Encryption Config The Customer managed encryption keys settings for the cluster. Structure defined below.
- Endpoint
Config ClusterCluster Config Endpoint Config The config settings for port access on the cluster. Structure defined below.
- Gce
Cluster ClusterConfig Cluster Config Gce Cluster Config Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
- Initialization
Actions []ClusterCluster Config Initialization Action Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
- Lifecycle
Config ClusterCluster Config Lifecycle Config The settings for auto deletion cluster schedule. Structure defined below.
- Master
Config ClusterCluster Config Master Config The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
- Metastore
Config ClusterCluster Config Metastore Config The config setting for metastore service with the cluster. Structure defined below.
- Preemptible
Worker ClusterConfig Cluster Config Preemptible Worker Config The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.
- NOTE :
preemptible_worker_config
is an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
- NOTE :
- Security
Config ClusterCluster Config Security Config Security related configuration. Structure defined below.
- Software
Config ClusterCluster Config Software Config The config settings for software inside the cluster. Structure defined below.
- Staging
Bucket string The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a
staging_bucket
then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.- Temp
Bucket string The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a
temp_bucket
then GCP will auto create / assign one for you.- Worker
Config ClusterCluster Config Worker Config The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
- autoscaling
Config ClusterCluster Config Autoscaling Config The autoscaling policy config associated with the cluster. Note that once set, if
autoscaling_config
is the only field set incluster_config
, it can only be removed by settingpolicy_uri = ""
, rather than removing the whole block. Structure defined below.- bucket String
- encryption
Config ClusterCluster Config Encryption Config The Customer managed encryption keys settings for the cluster. Structure defined below.
- endpoint
Config ClusterCluster Config Endpoint Config The config settings for port access on the cluster. Structure defined below.
- gce
Cluster ClusterConfig Cluster Config Gce Cluster Config Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
- initialization
Actions List<ClusterCluster Config Initialization Action> Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
- lifecycle
Config ClusterCluster Config Lifecycle Config The settings for auto deletion cluster schedule. Structure defined below.
- master
Config ClusterCluster Config Master Config The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
- metastore
Config ClusterCluster Config Metastore Config The config setting for metastore service with the cluster. Structure defined below.
- preemptible
Worker ClusterConfig Cluster Config Preemptible Worker Config The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.
- NOTE :
preemptible_worker_config
is an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
- NOTE :
- security
Config ClusterCluster Config Security Config Security related configuration. Structure defined below.
- software
Config ClusterCluster Config Software Config The config settings for software inside the cluster. Structure defined below.
- staging
Bucket String The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a
staging_bucket
then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.- temp
Bucket String The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a
temp_bucket
then GCP will auto create / assign one for you.- worker
Config ClusterCluster Config Worker Config The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
- autoscaling
Config ClusterCluster Config Autoscaling Config The autoscaling policy config associated with the cluster. Note that once set, if
autoscaling_config
is the only field set incluster_config
, it can only be removed by settingpolicy_uri = ""
, rather than removing the whole block. Structure defined below.- bucket string
- encryption
Config ClusterCluster Config Encryption Config The Customer managed encryption keys settings for the cluster. Structure defined below.
- endpoint
Config ClusterCluster Config Endpoint Config The config settings for port access on the cluster. Structure defined below.
- gce
Cluster ClusterConfig Cluster Config Gce Cluster Config Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
- initialization
Actions ClusterCluster Config Initialization Action[] Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
- lifecycle
Config ClusterCluster Config Lifecycle Config The settings for auto deletion cluster schedule. Structure defined below.
- master
Config ClusterCluster Config Master Config The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
- metastore
Config ClusterCluster Config Metastore Config The config setting for metastore service with the cluster. Structure defined below.
- preemptible
Worker ClusterConfig Cluster Config Preemptible Worker Config The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.
- NOTE :
preemptible_worker_config
is an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
- NOTE :
- security
Config ClusterCluster Config Security Config Security related configuration. Structure defined below.
- software
Config ClusterCluster Config Software Config The config settings for software inside the cluster. Structure defined below.
- staging
Bucket string The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a
staging_bucket
then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.- temp
Bucket string The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a
temp_bucket
then GCP will auto create / assign one for you.- worker
Config ClusterCluster Config Worker Config The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
- autoscaling_
config ClusterCluster Config Autoscaling Config The autoscaling policy config associated with the cluster. Note that once set, if
autoscaling_config
is the only field set incluster_config
, it can only be removed by settingpolicy_uri = ""
, rather than removing the whole block. Structure defined below.- bucket str
- encryption_
config ClusterCluster Config Encryption Config The Customer managed encryption keys settings for the cluster. Structure defined below.
- endpoint_
config ClusterCluster Config Endpoint Config The config settings for port access on the cluster. Structure defined below.
- gce_
cluster_ Clusterconfig Cluster Config Gce Cluster Config Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
- initialization_
actions Sequence[ClusterCluster Config Initialization Action] Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
- lifecycle_
config ClusterCluster Config Lifecycle Config The settings for auto deletion cluster schedule. Structure defined below.
- master_
config ClusterCluster Config Master Config The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
- metastore_
config ClusterCluster Config Metastore Config The config setting for metastore service with the cluster. Structure defined below.
- preemptible_
worker_ Clusterconfig Cluster Config Preemptible Worker Config The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.
- NOTE :
preemptible_worker_config
is an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
- NOTE :
- security_
config ClusterCluster Config Security Config Security related configuration. Structure defined below.
- software_
config ClusterCluster Config Software Config The config settings for software inside the cluster. Structure defined below.
- staging_
bucket str The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a
staging_bucket
then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.- temp_
bucket str The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a
temp_bucket
then GCP will auto create / assign one for you.- worker_
config ClusterCluster Config Worker Config The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
- autoscaling
Config Property Map The autoscaling policy config associated with the cluster. Note that once set, if
autoscaling_config
is the only field set incluster_config
, it can only be removed by settingpolicy_uri = ""
, rather than removing the whole block. Structure defined below.- bucket String
- encryption
Config Property Map The Customer managed encryption keys settings for the cluster. Structure defined below.
- endpoint
Config Property Map The config settings for port access on the cluster. Structure defined below.
- gce
Cluster Property MapConfig Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
- initialization
Actions List<Property Map> Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
- lifecycle
Config Property Map The settings for auto deletion cluster schedule. Structure defined below.
- master
Config Property Map The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
- metastore
Config Property Map The config setting for metastore service with the cluster. Structure defined below.
- preemptible
Worker Property MapConfig The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.
- NOTE :
preemptible_worker_config
is an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
- NOTE :
- security
Config Property Map Security related configuration. Structure defined below.
- software
Config Property Map The config settings for software inside the cluster. Structure defined below.
- staging
Bucket String The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a
staging_bucket
then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.- temp
Bucket String The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a
temp_bucket
then GCP will auto create / assign one for you.- worker
Config Property Map The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
ClusterClusterConfigAutoscalingConfig
- Policy
Uri string The autoscaling policy used by the cluster.
- Policy
Uri string The autoscaling policy used by the cluster.
- policy
Uri String The autoscaling policy used by the cluster.
- policy
Uri string The autoscaling policy used by the cluster.
- policy_
uri str The autoscaling policy used by the cluster.
- policy
Uri String The autoscaling policy used by the cluster.
ClusterClusterConfigEncryptionConfig
- Kms
Key stringName The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
- Kms
Key stringName The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
- kms
Key StringName The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
- kms
Key stringName The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
- kms_
key_ strname The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
- kms
Key StringName The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
ClusterClusterConfigEndpointConfig
- Enable
Http boolPort Access The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
- Http
Ports Dictionary<string, object>
- Enable
Http boolPort Access The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
- Http
Ports map[string]interface{}
- enable
Http BooleanPort Access The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
- http
Ports Map<String,Object>
- enable
Http booleanPort Access The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
- http
Ports {[key: string]: any}
- enable_
http_ boolport_ access The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
- http_
ports Mapping[str, Any]
- enable
Http BooleanPort Access The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
- http
Ports Map<Any>
ClusterClusterConfigGceClusterConfig
- Internal
Ip boolOnly By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as
privateIpGoogleAccess
) must be enabled on the subnetwork that the cluster will be launched in.- Metadata Dictionary<string, string>
A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
- Network string
The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with
subnetwork
. If neither is specified, this defaults to the "default" network.- Service
Account string The service account to be used by the Node VMs. If not specified, the "default" service account is used.
- Service
Account List<string>Scopes The set of Google API scopes to be made available on all of the node VMs under the
service_account
specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use thecloud-platform
scope. See a complete list of scopes here.- Shielded
Instance ClusterConfig Cluster Config Gce Cluster Config Shielded Instance Config Shielded Instance Config for clusters using Compute Engine Shielded VMs.
- Subnetwork string
The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with
network
.- List<string>
The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
- Zone string
The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If
region
is set to 'global' (default) thenzone
is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such ascluster_config.master_config.machine_type
andcluster_config.worker_config.machine_type
.
- Internal
Ip boolOnly By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as
privateIpGoogleAccess
) must be enabled on the subnetwork that the cluster will be launched in.- Metadata map[string]string
A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
- Network string
The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with
subnetwork
. If neither is specified, this defaults to the "default" network.- Service
Account string The service account to be used by the Node VMs. If not specified, the "default" service account is used.
- Service
Account []stringScopes The set of Google API scopes to be made available on all of the node VMs under the
service_account
specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use thecloud-platform
scope. See a complete list of scopes here.- Shielded
Instance ClusterConfig Cluster Config Gce Cluster Config Shielded Instance Config Shielded Instance Config for clusters using Compute Engine Shielded VMs.
- Subnetwork string
The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with
network
.- []string
The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
- Zone string
The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If
region
is set to 'global' (default) thenzone
is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such ascluster_config.master_config.machine_type
andcluster_config.worker_config.machine_type
.
- internal
Ip BooleanOnly By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as
privateIpGoogleAccess
) must be enabled on the subnetwork that the cluster will be launched in.- metadata Map<String,String>
A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
- network String
The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with
subnetwork
. If neither is specified, this defaults to the "default" network.- service
Account String The service account to be used by the Node VMs. If not specified, the "default" service account is used.
- service
Account List<String>Scopes The set of Google API scopes to be made available on all of the node VMs under the
service_account
specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use thecloud-platform
scope. See a complete list of scopes here.- shielded
Instance ClusterConfig Cluster Config Gce Cluster Config Shielded Instance Config Shielded Instance Config for clusters using Compute Engine Shielded VMs.
- subnetwork String
The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with
network
.- List<String>
The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
- zone String
The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If
region
is set to 'global' (default) thenzone
is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such ascluster_config.master_config.machine_type
andcluster_config.worker_config.machine_type
.
- internal
Ip booleanOnly By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as
privateIpGoogleAccess
) must be enabled on the subnetwork that the cluster will be launched in.- metadata {[key: string]: string}
A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
- network string
The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with
subnetwork
. If neither is specified, this defaults to the "default" network.- service
Account string The service account to be used by the Node VMs. If not specified, the "default" service account is used.
- service
Account string[]Scopes The set of Google API scopes to be made available on all of the node VMs under the
service_account
specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use thecloud-platform
scope. See a complete list of scopes here.- shielded
Instance ClusterConfig Cluster Config Gce Cluster Config Shielded Instance Config Shielded Instance Config for clusters using Compute Engine Shielded VMs.
- subnetwork string
The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with
network
.- string[]
The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
- zone string
The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If
region
is set to 'global' (default) thenzone
is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such ascluster_config.master_config.machine_type
andcluster_config.worker_config.machine_type
.
- internal_
ip_ boolonly By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as
privateIpGoogleAccess
) must be enabled on the subnetwork that the cluster will be launched in.- metadata Mapping[str, str]
A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
- network str
The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with
subnetwork
. If neither is specified, this defaults to the "default" network.- service_
account str The service account to be used by the Node VMs. If not specified, the "default" service account is used.
- service_
account_ Sequence[str]scopes The set of Google API scopes to be made available on all of the node VMs under the
service_account
specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use thecloud-platform
scope. See a complete list of scopes here.- shielded_
instance_ Clusterconfig Cluster Config Gce Cluster Config Shielded Instance Config Shielded Instance Config for clusters using Compute Engine Shielded VMs.
- subnetwork str
The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with
network
.- Sequence[str]
The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
- zone str
The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If
region
is set to 'global' (default) thenzone
is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such ascluster_config.master_config.machine_type
andcluster_config.worker_config.machine_type
.
- internal
Ip BooleanOnly By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as
privateIpGoogleAccess
) must be enabled on the subnetwork that the cluster will be launched in.- metadata Map<String>
A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
- network String
The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with
subnetwork
. If neither is specified, this defaults to the "default" network.- service
Account String The service account to be used by the Node VMs. If not specified, the "default" service account is used.
- service
Account List<String>Scopes The set of Google API scopes to be made available on all of the node VMs under the
service_account
specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use thecloud-platform
scope. See a complete list of scopes here.- shielded
Instance Property MapConfig Shielded Instance Config for clusters using Compute Engine Shielded VMs.
- subnetwork String
The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with
network
.- List<String>
The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
- zone String
The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If
region
is set to 'global' (default) thenzone
is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such ascluster_config.master_config.machine_type
andcluster_config.worker_config.machine_type
.
ClusterClusterConfigGceClusterConfigShieldedInstanceConfig
- Enable
Integrity boolMonitoring Defines whether instances have integrity monitoring enabled.
- Enable
Secure boolBoot Defines whether instances have Secure Boot enabled.
- Enable
Vtpm bool Defines whether instances have the vTPM enabled.
- Enable
Integrity boolMonitoring Defines whether instances have integrity monitoring enabled.
- Enable
Secure boolBoot Defines whether instances have Secure Boot enabled.
- Enable
Vtpm bool Defines whether instances have the vTPM enabled.
- enable
Integrity BooleanMonitoring Defines whether instances have integrity monitoring enabled.
- enable
Secure BooleanBoot Defines whether instances have Secure Boot enabled.
- enable
Vtpm Boolean Defines whether instances have the vTPM enabled.
- enable
Integrity booleanMonitoring Defines whether instances have integrity monitoring enabled.
- enable
Secure booleanBoot Defines whether instances have Secure Boot enabled.
- enable
Vtpm boolean Defines whether instances have the vTPM enabled.
- enable_
integrity_ boolmonitoring Defines whether instances have integrity monitoring enabled.
- enable_
secure_ boolboot Defines whether instances have Secure Boot enabled.
- enable_
vtpm bool Defines whether instances have the vTPM enabled.
- enable
Integrity BooleanMonitoring Defines whether instances have integrity monitoring enabled.
- enable
Secure BooleanBoot Defines whether instances have Secure Boot enabled.
- enable
Vtpm Boolean Defines whether instances have the vTPM enabled.
ClusterClusterConfigInitializationAction
- Script string
The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
- Timeout
Sec int The maximum duration (in seconds) which
script
is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).
- Script string
The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
- Timeout
Sec int The maximum duration (in seconds) which
script
is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).
- script String
The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
- timeout
Sec Integer The maximum duration (in seconds) which
script
is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).
- script string
The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
- timeout
Sec number The maximum duration (in seconds) which
script
is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).
- script str
The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
- timeout_
sec int The maximum duration (in seconds) which
script
is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).
- script String
The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
- timeout
Sec Number The maximum duration (in seconds) which
script
is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).
ClusterClusterConfigLifecycleConfig
- Auto
Delete stringTime The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".
- Idle
Delete stringTtl The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
- Idle
Start stringTime
- Auto
Delete stringTime The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".
- Idle
Delete stringTtl The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
- Idle
Start stringTime
- auto
Delete StringTime The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".
- idle
Delete StringTtl The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
- idle
Start StringTime
- auto
Delete stringTime The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".
- idle
Delete stringTtl The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
- idle
Start stringTime
- auto_
delete_ strtime The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".
- idle_
delete_ strttl The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
- idle_
start_ strtime
- auto
Delete StringTime The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".
- idle
Delete StringTtl The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
- idle
Start StringTime
ClusterClusterConfigMasterConfig
- Accelerators
List<Cluster
Cluster Config Master Config Accelerator> The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- Disk
Config ClusterCluster Config Master Config Disk Config Disk Config
- Image
Uri string The URI for the image to use for this worker. See the guide for more information.
- Instance
Names List<string> - Machine
Type string The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently
n1-standard-4
).- Min
Cpu stringPlatform The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- Num
Instances int Specifies the number of preemptible nodes to create. Defaults to 0.
- Accelerators
[]Cluster
Cluster Config Master Config Accelerator The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- Disk
Config ClusterCluster Config Master Config Disk Config Disk Config
- Image
Uri string The URI for the image to use for this worker. See the guide for more information.
- Instance
Names []string - Machine
Type string The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently
n1-standard-4
).- Min
Cpu stringPlatform The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- Num
Instances int Specifies the number of preemptible nodes to create. Defaults to 0.
- accelerators
List<Cluster
Cluster Config Master Config Accelerator> The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- disk
Config ClusterCluster Config Master Config Disk Config Disk Config
- image
Uri String The URI for the image to use for this worker. See the guide for more information.
- instance
Names List<String> - machine
Type String The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently
n1-standard-4
).- min
Cpu StringPlatform The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- num
Instances Integer Specifies the number of preemptible nodes to create. Defaults to 0.
- accelerators
Cluster
Cluster Config Master Config Accelerator[] The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- disk
Config ClusterCluster Config Master Config Disk Config Disk Config
- image
Uri string The URI for the image to use for this worker. See the guide for more information.
- instance
Names string[] - machine
Type string The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently
n1-standard-4
).- min
Cpu stringPlatform The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- num
Instances number Specifies the number of preemptible nodes to create. Defaults to 0.
- accelerators
Sequence[Cluster
Cluster Config Master Config Accelerator] The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- disk_
config ClusterCluster Config Master Config Disk Config Disk Config
- image_
uri str The URI for the image to use for this worker. See the guide for more information.
- instance_
names Sequence[str] - machine_
type str The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently
n1-standard-4
).- min_
cpu_ strplatform The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- num_
instances int Specifies the number of preemptible nodes to create. Defaults to 0.
- accelerators List<Property Map>
The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- disk
Config Property Map Disk Config
- image
Uri String The URI for the image to use for this worker. See the guide for more information.
- instance
Names List<String> - machine
Type String The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently
n1-standard-4
).- min
Cpu StringPlatform The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- num
Instances Number Specifies the number of preemptible nodes to create. Defaults to 0.
ClusterClusterConfigMasterConfigAccelerator
- Accelerator
Count int The number of the accelerator cards of this type exposed to this instance. Often restricted to one of
1
,2
,4
, or8
.- Accelerator
Type string The short name of the accelerator type to expose to this instance. For example,
nvidia-tesla-k80
.
- Accelerator
Count int The number of the accelerator cards of this type exposed to this instance. Often restricted to one of
1
,2
,4
, or8
.- Accelerator
Type string The short name of the accelerator type to expose to this instance. For example,
nvidia-tesla-k80
.
- accelerator
Count Integer The number of the accelerator cards of this type exposed to this instance. Often restricted to one of
1
,2
,4
, or8
.- accelerator
Type String The short name of the accelerator type to expose to this instance. For example,
nvidia-tesla-k80
.
- accelerator
Count number The number of the accelerator cards of this type exposed to this instance. Often restricted to one of
1
,2
,4
, or8
.- accelerator
Type string The short name of the accelerator type to expose to this instance. For example,
nvidia-tesla-k80
.
- accelerator_
count int The number of the accelerator cards of this type exposed to this instance. Often restricted to one of
1
,2
,4
, or8
.- accelerator_
type str The short name of the accelerator type to expose to this instance. For example,
nvidia-tesla-k80
.
- accelerator
Count Number The number of the accelerator cards of this type exposed to this instance. Often restricted to one of
1
,2
,4
, or8
.- accelerator
Type String The short name of the accelerator type to expose to this instance. For example,
nvidia-tesla-k80
.
ClusterClusterConfigMasterConfigDiskConfig
- Boot
Disk intSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- Boot
Disk stringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- Num
Local intSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- Boot
Disk intSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- Boot
Disk stringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- Num
Local intSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot
Disk IntegerSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot
Disk StringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- num
Local IntegerSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot
Disk numberSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot
Disk stringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- num
Local numberSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot_
disk_ intsize_ gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot_
disk_ strtype The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- num_
local_ intssds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot
Disk NumberSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot
Disk StringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- num
Local NumberSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
ClusterClusterConfigMetastoreConfig
- Dataproc
Metastore stringService Resource name of an existing Dataproc Metastore service.
- Dataproc
Metastore stringService Resource name of an existing Dataproc Metastore service.
- dataproc
Metastore StringService Resource name of an existing Dataproc Metastore service.
- dataproc
Metastore stringService Resource name of an existing Dataproc Metastore service.
- dataproc_
metastore_ strservice Resource name of an existing Dataproc Metastore service.
- dataproc
Metastore StringService Resource name of an existing Dataproc Metastore service.
ClusterClusterConfigPreemptibleWorkerConfig
- Disk
Config ClusterCluster Config Preemptible Worker Config Disk Config Disk Config
- Instance
Names List<string> - Num
Instances int Specifies the number of preemptible nodes to create. Defaults to 0.
- Preemptibility string
Specifies the preemptibility of the secondary workers. The default value is
PREEMPTIBLE
Accepted values are:- PREEMPTIBILITY_UNSPECIFIED
- NON_PREEMPTIBLE
- PREEMPTIBLE
- Disk
Config ClusterCluster Config Preemptible Worker Config Disk Config Disk Config
- Instance
Names []string - Num
Instances int Specifies the number of preemptible nodes to create. Defaults to 0.
- Preemptibility string
Specifies the preemptibility of the secondary workers. The default value is
PREEMPTIBLE
Accepted values are:- PREEMPTIBILITY_UNSPECIFIED
- NON_PREEMPTIBLE
- PREEMPTIBLE
- disk
Config ClusterCluster Config Preemptible Worker Config Disk Config Disk Config
- instance
Names List<String> - num
Instances Integer Specifies the number of preemptible nodes to create. Defaults to 0.
- preemptibility String
Specifies the preemptibility of the secondary workers. The default value is
PREEMPTIBLE
Accepted values are:- PREEMPTIBILITY_UNSPECIFIED
- NON_PREEMPTIBLE
- PREEMPTIBLE
- disk
Config ClusterCluster Config Preemptible Worker Config Disk Config Disk Config
- instance
Names string[] - num
Instances number Specifies the number of preemptible nodes to create. Defaults to 0.
- preemptibility string
Specifies the preemptibility of the secondary workers. The default value is
PREEMPTIBLE
Accepted values are:- PREEMPTIBILITY_UNSPECIFIED
- NON_PREEMPTIBLE
- PREEMPTIBLE
- disk_
config ClusterCluster Config Preemptible Worker Config Disk Config Disk Config
- instance_
names Sequence[str] - num_
instances int Specifies the number of preemptible nodes to create. Defaults to 0.
- preemptibility str
Specifies the preemptibility of the secondary workers. The default value is
PREEMPTIBLE
Accepted values are:- PREEMPTIBILITY_UNSPECIFIED
- NON_PREEMPTIBLE
- PREEMPTIBLE
- disk
Config Property Map Disk Config
- instance
Names List<String> - num
Instances Number Specifies the number of preemptible nodes to create. Defaults to 0.
- preemptibility String
Specifies the preemptibility of the secondary workers. The default value is
PREEMPTIBLE
Accepted values are:- PREEMPTIBILITY_UNSPECIFIED
- NON_PREEMPTIBLE
- PREEMPTIBLE
ClusterClusterConfigPreemptibleWorkerConfigDiskConfig
- Boot
Disk intSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- Boot
Disk stringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- Num
Local intSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- Boot
Disk intSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- Boot
Disk stringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- Num
Local intSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot
Disk IntegerSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot
Disk StringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- num
Local IntegerSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot
Disk numberSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot
Disk stringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- num
Local numberSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot_
disk_ intsize_ gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot_
disk_ strtype The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- num_
local_ intssds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot
Disk NumberSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot
Disk StringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- num
Local NumberSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
ClusterClusterConfigSecurityConfig
- Kerberos
Config ClusterCluster Config Security Config Kerberos Config Kerberos Configuration
- Kerberos
Config ClusterCluster Config Security Config Kerberos Config Kerberos Configuration
- kerberos
Config ClusterCluster Config Security Config Kerberos Config Kerberos Configuration
- kerberos
Config ClusterCluster Config Security Config Kerberos Config Kerberos Configuration
- kerberos_
config ClusterCluster Config Security Config Kerberos Config Kerberos Configuration
- kerberos
Config Property Map Kerberos Configuration
ClusterClusterConfigSecurityConfigKerberosConfig
- Kms
Key stringUri The URI of the KMS key used to encrypt various sensitive files.
- Root
Principal stringPassword Uri The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- Cross
Realm stringTrust Admin Server The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- Cross
Realm stringTrust Kdc The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- Cross
Realm stringTrust Realm The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
- string
The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- Enable
Kerberos bool Flag to indicate whether to Kerberize the cluster.
- Kdc
Db stringKey Uri The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- Key
Password stringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- Keystore
Password stringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
- Keystore
Uri string The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- Realm string
The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- Tgt
Lifetime intHours The lifetime of the ticket granting ticket, in hours.
- Truststore
Password stringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- Truststore
Uri string The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- Kms
Key stringUri The URI of the KMS key used to encrypt various sensitive files.
- Root
Principal stringPassword Uri The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- Cross
Realm stringTrust Admin Server The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- Cross
Realm stringTrust Kdc The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- Cross
Realm stringTrust Realm The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
- string
The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- Enable
Kerberos bool Flag to indicate whether to Kerberize the cluster.
- Kdc
Db stringKey Uri The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- Key
Password stringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- Keystore
Password stringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
- Keystore
Uri string The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- Realm string
The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- Tgt
Lifetime intHours The lifetime of the ticket granting ticket, in hours.
- Truststore
Password stringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- Truststore
Uri string The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- kms
Key StringUri The URI of the KMS key used to encrypt various sensitive files.
- root
Principal StringPassword Uri The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- cross
Realm StringTrust Admin Server The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross
Realm StringTrust Kdc The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross
Realm StringTrust Realm The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
- String
The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- enable
Kerberos Boolean Flag to indicate whether to Kerberize the cluster.
- kdc
Db StringKey Uri The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- key
Password StringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- keystore
Password StringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
- keystore
Uri String The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- realm String
The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- tgt
Lifetime IntegerHours The lifetime of the ticket granting ticket, in hours.
- truststore
Password StringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- truststore
Uri String The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- kms
Key stringUri The URI of the KMS key used to encrypt various sensitive files.
- root
Principal stringPassword Uri The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- cross
Realm stringTrust Admin Server The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross
Realm stringTrust Kdc The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross
Realm stringTrust Realm The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
- string
The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- enable
Kerberos boolean Flag to indicate whether to Kerberize the cluster.
- kdc
Db stringKey Uri The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- key
Password stringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- keystore
Password stringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
- keystore
Uri string The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- realm string
The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- tgt
Lifetime numberHours The lifetime of the ticket granting ticket, in hours.
- truststore
Password stringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- truststore
Uri string The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- kms_
key_ struri The URI of the KMS key used to encrypt various sensitive files.
- root_
principal_ strpassword_ uri The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- cross_
realm_ strtrust_ admin_ server The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross_
realm_ strtrust_ kdc The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross_
realm_ strtrust_ realm The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
- str
The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- enable_
kerberos bool Flag to indicate whether to Kerberize the cluster.
- kdc_
db_ strkey_ uri The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- key_
password_ struri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- keystore_
password_ struri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
- keystore_
uri str The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- realm str
The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- tgt_
lifetime_ inthours The lifetime of the ticket granting ticket, in hours.
- truststore_
password_ struri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- truststore_
uri str The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- kms
Key StringUri The URI of the KMS key used to encrypt various sensitive files.
- root
Principal StringPassword Uri The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- cross
Realm StringTrust Admin Server The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross
Realm StringTrust Kdc The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross
Realm StringTrust Realm The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
- String
The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- enable
Kerberos Boolean Flag to indicate whether to Kerberize the cluster.
- kdc
Db StringKey Uri The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- key
Password StringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- keystore
Password StringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
- keystore
Uri String The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- realm String
The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- tgt
Lifetime NumberHours The lifetime of the ticket granting ticket, in hours.
- truststore
Password StringUri The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- truststore
Uri String The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
ClusterClusterConfigSoftwareConfig
- Image
Version string The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
- Optional
Components List<string> The set of optional components to activate on the cluster. Accepted values are:
- ANACONDA
- DRUID
- FLINK
- HBASE
- HIVE_WEBHCAT
- JUPYTER
- PRESTO
- RANGER
- SOLR
- ZEPPELIN
- ZOOKEEPER
- Override
Properties Dictionary<string, string> A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
- Properties Dictionary<string, object>
- Image
Version string The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
- Optional
Components []string The set of optional components to activate on the cluster. Accepted values are:
- ANACONDA
- DRUID
- FLINK
- HBASE
- HIVE_WEBHCAT
- JUPYTER
- PRESTO
- RANGER
- SOLR
- ZEPPELIN
- ZOOKEEPER
- Override
Properties map[string]string A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
- Properties map[string]interface{}
- image
Version String The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
- optional
Components List<String> The set of optional components to activate on the cluster. Accepted values are:
- ANACONDA
- DRUID
- FLINK
- HBASE
- HIVE_WEBHCAT
- JUPYTER
- PRESTO
- RANGER
- SOLR
- ZEPPELIN
- ZOOKEEPER
- override
Properties Map<String,String> A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
- properties Map<String,Object>
- image
Version string The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
- optional
Components string[] The set of optional components to activate on the cluster. Accepted values are:
- ANACONDA
- DRUID
- FLINK
- HBASE
- HIVE_WEBHCAT
- JUPYTER
- PRESTO
- RANGER
- SOLR
- ZEPPELIN
- ZOOKEEPER
- override
Properties {[key: string]: string} A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
- properties {[key: string]: any}
- image_
version str The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
- optional_
components Sequence[str] The set of optional components to activate on the cluster. Accepted values are:
- ANACONDA
- DRUID
- FLINK
- HBASE
- HIVE_WEBHCAT
- JUPYTER
- PRESTO
- RANGER
- SOLR
- ZEPPELIN
- ZOOKEEPER
- override_
properties Mapping[str, str] A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
- properties Mapping[str, Any]
- image
Version String The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
- optional
Components List<String> The set of optional components to activate on the cluster. Accepted values are:
- ANACONDA
- DRUID
- FLINK
- HBASE
- HIVE_WEBHCAT
- JUPYTER
- PRESTO
- RANGER
- SOLR
- ZEPPELIN
- ZOOKEEPER
- override
Properties Map<String> A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
- properties Map<Any>
ClusterClusterConfigWorkerConfig
- Accelerators
List<Cluster
Cluster Config Worker Config Accelerator> The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- Disk
Config ClusterCluster Config Worker Config Disk Config Disk Config
- Image
Uri string The URI for the image to use for this worker. See the guide for more information.
- Instance
Names List<string> - Machine
Type string The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently
n1-standard-4
).- Min
Cpu stringPlatform The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- Num
Instances int Specifies the number of preemptible nodes to create. Defaults to 0.
- Accelerators
[]Cluster
Cluster Config Worker Config Accelerator The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- Disk
Config ClusterCluster Config Worker Config Disk Config Disk Config
- Image
Uri string The URI for the image to use for this worker. See the guide for more information.
- Instance
Names []string - Machine
Type string The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently
n1-standard-4
).- Min
Cpu stringPlatform The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- Num
Instances int Specifies the number of preemptible nodes to create. Defaults to 0.
- accelerators
List<Cluster
Cluster Config Worker Config Accelerator> The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- disk
Config ClusterCluster Config Worker Config Disk Config Disk Config
- image
Uri String The URI for the image to use for this worker. See the guide for more information.
- instance
Names List<String> - machine
Type String The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently
n1-standard-4
).- min
Cpu StringPlatform The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- num
Instances Integer Specifies the number of preemptible nodes to create. Defaults to 0.
- accelerators
Cluster
Cluster Config Worker Config Accelerator[] The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- disk
Config ClusterCluster Config Worker Config Disk Config Disk Config
- image
Uri string The URI for the image to use for this worker. See the guide for more information.
- instance
Names string[] - machine
Type string The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently
n1-standard-4
).- min
Cpu stringPlatform The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- num
Instances number Specifies the number of preemptible nodes to create. Defaults to 0.
- accelerators
Sequence[Cluster
Cluster Config Worker Config Accelerator] The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- disk_
config ClusterCluster Config Worker Config Disk Config Disk Config
- image_
uri str The URI for the image to use for this worker. See the guide for more information.
- instance_
names Sequence[str] - machine_
type str The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently
n1-standard-4
).- min_
cpu_ strplatform The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- num_
instances int Specifies the number of preemptible nodes to create. Defaults to 0.
- accelerators List<Property Map>
The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
- disk
Config Property Map Disk Config
- image
Uri String The URI for the image to use for this worker. See the guide for more information.
- instance
Names List<String> - machine
Type String The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently
n1-standard-4
).- min
Cpu StringPlatform The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
- num
Instances Number Specifies the number of preemptible nodes to create. Defaults to 0.
ClusterClusterConfigWorkerConfigAccelerator
- Accelerator
Count int The number of the accelerator cards of this type exposed to this instance. Often restricted to one of
1
,2
,4
, or8
.- Accelerator
Type string The short name of the accelerator type to expose to this instance. For example,
nvidia-tesla-k80
.
- Accelerator
Count int The number of the accelerator cards of this type exposed to this instance. Often restricted to one of
1
,2
,4
, or8
.- Accelerator
Type string The short name of the accelerator type to expose to this instance. For example,
nvidia-tesla-k80
.
- accelerator
Count Integer The number of the accelerator cards of this type exposed to this instance. Often restricted to one of
1
,2
,4
, or8
.- accelerator
Type String The short name of the accelerator type to expose to this instance. For example,
nvidia-tesla-k80
.
- accelerator
Count number The number of the accelerator cards of this type exposed to this instance. Often restricted to one of
1
,2
,4
, or8
.- accelerator
Type string The short name of the accelerator type to expose to this instance. For example,
nvidia-tesla-k80
.
- accelerator_
count int The number of the accelerator cards of this type exposed to this instance. Often restricted to one of
1
,2
,4
, or8
.- accelerator_
type str The short name of the accelerator type to expose to this instance. For example,
nvidia-tesla-k80
.
- accelerator
Count Number The number of the accelerator cards of this type exposed to this instance. Often restricted to one of
1
,2
,4
, or8
.- accelerator
Type String The short name of the accelerator type to expose to this instance. For example,
nvidia-tesla-k80
.
ClusterClusterConfigWorkerConfigDiskConfig
- Boot
Disk intSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- Boot
Disk stringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- Num
Local intSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- Boot
Disk intSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- Boot
Disk stringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- Num
Local intSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot
Disk IntegerSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot
Disk StringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- num
Local IntegerSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot
Disk numberSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot
Disk stringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- num
Local numberSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot_
disk_ intsize_ gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot_
disk_ strtype The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- num_
local_ intssds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
- boot
Disk NumberSize Gb Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
- boot
Disk StringType The disk type of the primary disk attached to each preemptible worker node. One of
"pd-ssd"
or"pd-standard"
. Defaults to"pd-standard"
.- num
Local NumberSsds The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.
Import
This resource does not support import.
Package Details
- Repository
- https://github.com/pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
This Pulumi package is based on the
google-beta
Terraform Provider.