1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. dataproc
  5. Cluster
Google Cloud Classic v7.19.0 published on Thursday, Apr 18, 2024 by Pulumi

gcp.dataproc.Cluster

Explore with Pulumi AI

gcp logo
Google Cloud Classic v7.19.0 published on Thursday, Apr 18, 2024 by Pulumi

    Manages a Cloud Dataproc cluster resource within GCP.

    !> Warning: Due to limitations of the API, all arguments except labels,cluster_config.worker_config.num_instances and cluster_config.preemptible_worker_config.num_instances are non-updatable. Changing cluster_config.worker_config.min_num_instances will be ignored. Changing others will cause recreation of the whole cluster!

    Example Usage

    Basic

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const simplecluster = new gcp.dataproc.Cluster("simplecluster", {
        name: "simplecluster",
        region: "us-central1",
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    simplecluster = gcp.dataproc.Cluster("simplecluster",
        name="simplecluster",
        region="us-central1")
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataproc"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dataproc.NewCluster(ctx, "simplecluster", &dataproc.ClusterArgs{
    			Name:   pulumi.String("simplecluster"),
    			Region: pulumi.String("us-central1"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var simplecluster = new Gcp.Dataproc.Cluster("simplecluster", new()
        {
            Name = "simplecluster",
            Region = "us-central1",
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.dataproc.Cluster;
    import com.pulumi.gcp.dataproc.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var simplecluster = new Cluster("simplecluster", ClusterArgs.builder()        
                .name("simplecluster")
                .region("us-central1")
                .build());
    
        }
    }
    
    resources:
      simplecluster:
        type: gcp:dataproc:Cluster
        properties:
          name: simplecluster
          region: us-central1
    

    Advanced

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const _default = new gcp.serviceaccount.Account("default", {
        accountId: "service-account-id",
        displayName: "Service Account",
    });
    const mycluster = new gcp.dataproc.Cluster("mycluster", {
        name: "mycluster",
        region: "us-central1",
        gracefulDecommissionTimeout: "120s",
        labels: {
            foo: "bar",
        },
        clusterConfig: {
            stagingBucket: "dataproc-staging-bucket",
            masterConfig: {
                numInstances: 1,
                machineType: "e2-medium",
                diskConfig: {
                    bootDiskType: "pd-ssd",
                    bootDiskSizeGb: 30,
                },
            },
            workerConfig: {
                numInstances: 2,
                machineType: "e2-medium",
                minCpuPlatform: "Intel Skylake",
                diskConfig: {
                    bootDiskSizeGb: 30,
                    numLocalSsds: 1,
                },
            },
            preemptibleWorkerConfig: {
                numInstances: 0,
            },
            softwareConfig: {
                imageVersion: "2.0.35-debian10",
                overrideProperties: {
                    "dataproc:dataproc.allow.zero.workers": "true",
                },
            },
            gceClusterConfig: {
                tags: [
                    "foo",
                    "bar",
                ],
                serviceAccount: _default.email,
                serviceAccountScopes: ["cloud-platform"],
            },
            initializationActions: [{
                script: "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh",
                timeoutSec: 500,
            }],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    default = gcp.serviceaccount.Account("default",
        account_id="service-account-id",
        display_name="Service Account")
    mycluster = gcp.dataproc.Cluster("mycluster",
        name="mycluster",
        region="us-central1",
        graceful_decommission_timeout="120s",
        labels={
            "foo": "bar",
        },
        cluster_config=gcp.dataproc.ClusterClusterConfigArgs(
            staging_bucket="dataproc-staging-bucket",
            master_config=gcp.dataproc.ClusterClusterConfigMasterConfigArgs(
                num_instances=1,
                machine_type="e2-medium",
                disk_config=gcp.dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs(
                    boot_disk_type="pd-ssd",
                    boot_disk_size_gb=30,
                ),
            ),
            worker_config=gcp.dataproc.ClusterClusterConfigWorkerConfigArgs(
                num_instances=2,
                machine_type="e2-medium",
                min_cpu_platform="Intel Skylake",
                disk_config=gcp.dataproc.ClusterClusterConfigWorkerConfigDiskConfigArgs(
                    boot_disk_size_gb=30,
                    num_local_ssds=1,
                ),
            ),
            preemptible_worker_config=gcp.dataproc.ClusterClusterConfigPreemptibleWorkerConfigArgs(
                num_instances=0,
            ),
            software_config=gcp.dataproc.ClusterClusterConfigSoftwareConfigArgs(
                image_version="2.0.35-debian10",
                override_properties={
                    "dataproc:dataproc.allow.zero.workers": "true",
                },
            ),
            gce_cluster_config=gcp.dataproc.ClusterClusterConfigGceClusterConfigArgs(
                tags=[
                    "foo",
                    "bar",
                ],
                service_account=default.email,
                service_account_scopes=["cloud-platform"],
            ),
            initialization_actions=[gcp.dataproc.ClusterClusterConfigInitializationActionArgs(
                script="gs://dataproc-initialization-actions/stackdriver/stackdriver.sh",
                timeout_sec=500,
            )],
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataproc"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/serviceaccount"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
    			AccountId:   pulumi.String("service-account-id"),
    			DisplayName: pulumi.String("Service Account"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = dataproc.NewCluster(ctx, "mycluster", &dataproc.ClusterArgs{
    			Name:                        pulumi.String("mycluster"),
    			Region:                      pulumi.String("us-central1"),
    			GracefulDecommissionTimeout: pulumi.String("120s"),
    			Labels: pulumi.StringMap{
    				"foo": pulumi.String("bar"),
    			},
    			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
    				StagingBucket: pulumi.String("dataproc-staging-bucket"),
    				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
    					NumInstances: pulumi.Int(1),
    					MachineType:  pulumi.String("e2-medium"),
    					DiskConfig: &dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs{
    						BootDiskType:   pulumi.String("pd-ssd"),
    						BootDiskSizeGb: pulumi.Int(30),
    					},
    				},
    				WorkerConfig: &dataproc.ClusterClusterConfigWorkerConfigArgs{
    					NumInstances:   pulumi.Int(2),
    					MachineType:    pulumi.String("e2-medium"),
    					MinCpuPlatform: pulumi.String("Intel Skylake"),
    					DiskConfig: &dataproc.ClusterClusterConfigWorkerConfigDiskConfigArgs{
    						BootDiskSizeGb: pulumi.Int(30),
    						NumLocalSsds:   pulumi.Int(1),
    					},
    				},
    				PreemptibleWorkerConfig: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigArgs{
    					NumInstances: pulumi.Int(0),
    				},
    				SoftwareConfig: &dataproc.ClusterClusterConfigSoftwareConfigArgs{
    					ImageVersion: pulumi.String("2.0.35-debian10"),
    					OverrideProperties: pulumi.StringMap{
    						"dataproc:dataproc.allow.zero.workers": pulumi.String("true"),
    					},
    				},
    				GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
    					Tags: pulumi.StringArray{
    						pulumi.String("foo"),
    						pulumi.String("bar"),
    					},
    					ServiceAccount: _default.Email,
    					ServiceAccountScopes: pulumi.StringArray{
    						pulumi.String("cloud-platform"),
    					},
    				},
    				InitializationActions: dataproc.ClusterClusterConfigInitializationActionArray{
    					&dataproc.ClusterClusterConfigInitializationActionArgs{
    						Script:     pulumi.String("gs://dataproc-initialization-actions/stackdriver/stackdriver.sh"),
    						TimeoutSec: pulumi.Int(500),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var @default = new Gcp.ServiceAccount.Account("default", new()
        {
            AccountId = "service-account-id",
            DisplayName = "Service Account",
        });
    
        var mycluster = new Gcp.Dataproc.Cluster("mycluster", new()
        {
            Name = "mycluster",
            Region = "us-central1",
            GracefulDecommissionTimeout = "120s",
            Labels = 
            {
                { "foo", "bar" },
            },
            ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
            {
                StagingBucket = "dataproc-staging-bucket",
                MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
                {
                    NumInstances = 1,
                    MachineType = "e2-medium",
                    DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigDiskConfigArgs
                    {
                        BootDiskType = "pd-ssd",
                        BootDiskSizeGb = 30,
                    },
                },
                WorkerConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigArgs
                {
                    NumInstances = 2,
                    MachineType = "e2-medium",
                    MinCpuPlatform = "Intel Skylake",
                    DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigDiskConfigArgs
                    {
                        BootDiskSizeGb = 30,
                        NumLocalSsds = 1,
                    },
                },
                PreemptibleWorkerConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigArgs
                {
                    NumInstances = 0,
                },
                SoftwareConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSoftwareConfigArgs
                {
                    ImageVersion = "2.0.35-debian10",
                    OverrideProperties = 
                    {
                        { "dataproc:dataproc.allow.zero.workers", "true" },
                    },
                },
                GceClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigArgs
                {
                    Tags = new[]
                    {
                        "foo",
                        "bar",
                    },
                    ServiceAccount = @default.Email,
                    ServiceAccountScopes = new[]
                    {
                        "cloud-platform",
                    },
                },
                InitializationActions = new[]
                {
                    new Gcp.Dataproc.Inputs.ClusterClusterConfigInitializationActionArgs
                    {
                        Script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh",
                        TimeoutSec = 500,
                    },
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.serviceaccount.Account;
    import com.pulumi.gcp.serviceaccount.AccountArgs;
    import com.pulumi.gcp.dataproc.Cluster;
    import com.pulumi.gcp.dataproc.ClusterArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigDiskConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigWorkerConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigWorkerConfigDiskConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigPreemptibleWorkerConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigSoftwareConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigGceClusterConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var default_ = new Account("default", AccountArgs.builder()        
                .accountId("service-account-id")
                .displayName("Service Account")
                .build());
    
            var mycluster = new Cluster("mycluster", ClusterArgs.builder()        
                .name("mycluster")
                .region("us-central1")
                .gracefulDecommissionTimeout("120s")
                .labels(Map.of("foo", "bar"))
                .clusterConfig(ClusterClusterConfigArgs.builder()
                    .stagingBucket("dataproc-staging-bucket")
                    .masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
                        .numInstances(1)
                        .machineType("e2-medium")
                        .diskConfig(ClusterClusterConfigMasterConfigDiskConfigArgs.builder()
                            .bootDiskType("pd-ssd")
                            .bootDiskSizeGb(30)
                            .build())
                        .build())
                    .workerConfig(ClusterClusterConfigWorkerConfigArgs.builder()
                        .numInstances(2)
                        .machineType("e2-medium")
                        .minCpuPlatform("Intel Skylake")
                        .diskConfig(ClusterClusterConfigWorkerConfigDiskConfigArgs.builder()
                            .bootDiskSizeGb(30)
                            .numLocalSsds(1)
                            .build())
                        .build())
                    .preemptibleWorkerConfig(ClusterClusterConfigPreemptibleWorkerConfigArgs.builder()
                        .numInstances(0)
                        .build())
                    .softwareConfig(ClusterClusterConfigSoftwareConfigArgs.builder()
                        .imageVersion("2.0.35-debian10")
                        .overrideProperties(Map.of("dataproc:dataproc.allow.zero.workers", "true"))
                        .build())
                    .gceClusterConfig(ClusterClusterConfigGceClusterConfigArgs.builder()
                        .tags(                    
                            "foo",
                            "bar")
                        .serviceAccount(default_.email())
                        .serviceAccountScopes("cloud-platform")
                        .build())
                    .initializationActions(ClusterClusterConfigInitializationActionArgs.builder()
                        .script("gs://dataproc-initialization-actions/stackdriver/stackdriver.sh")
                        .timeoutSec(500)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      default:
        type: gcp:serviceaccount:Account
        properties:
          accountId: service-account-id
          displayName: Service Account
      mycluster:
        type: gcp:dataproc:Cluster
        properties:
          name: mycluster
          region: us-central1
          gracefulDecommissionTimeout: 120s
          labels:
            foo: bar
          clusterConfig:
            stagingBucket: dataproc-staging-bucket
            masterConfig:
              numInstances: 1
              machineType: e2-medium
              diskConfig:
                bootDiskType: pd-ssd
                bootDiskSizeGb: 30
            workerConfig:
              numInstances: 2
              machineType: e2-medium
              minCpuPlatform: Intel Skylake
              diskConfig:
                bootDiskSizeGb: 30
                numLocalSsds: 1
            preemptibleWorkerConfig:
              numInstances: 0
            softwareConfig:
              imageVersion: 2.0.35-debian10
              overrideProperties:
                dataproc:dataproc.allow.zero.workers: 'true'
            gceClusterConfig:
              tags:
                - foo
                - bar
              serviceAccount: ${default.email}
              serviceAccountScopes:
                - cloud-platform
            initializationActions:
              - script: gs://dataproc-initialization-actions/stackdriver/stackdriver.sh
                timeoutSec: 500
    

    Using A GPU Accelerator

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const acceleratedCluster = new gcp.dataproc.Cluster("accelerated_cluster", {
        name: "my-cluster-with-gpu",
        region: "us-central1",
        clusterConfig: {
            gceClusterConfig: {
                zone: "us-central1-a",
            },
            masterConfig: {
                accelerators: [{
                    acceleratorType: "nvidia-tesla-k80",
                    acceleratorCount: 1,
                }],
            },
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    accelerated_cluster = gcp.dataproc.Cluster("accelerated_cluster",
        name="my-cluster-with-gpu",
        region="us-central1",
        cluster_config=gcp.dataproc.ClusterClusterConfigArgs(
            gce_cluster_config=gcp.dataproc.ClusterClusterConfigGceClusterConfigArgs(
                zone="us-central1-a",
            ),
            master_config=gcp.dataproc.ClusterClusterConfigMasterConfigArgs(
                accelerators=[gcp.dataproc.ClusterClusterConfigMasterConfigAcceleratorArgs(
                    accelerator_type="nvidia-tesla-k80",
                    accelerator_count=1,
                )],
            ),
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataproc"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dataproc.NewCluster(ctx, "accelerated_cluster", &dataproc.ClusterArgs{
    			Name:   pulumi.String("my-cluster-with-gpu"),
    			Region: pulumi.String("us-central1"),
    			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
    				GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
    					Zone: pulumi.String("us-central1-a"),
    				},
    				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
    					Accelerators: dataproc.ClusterClusterConfigMasterConfigAcceleratorArray{
    						&dataproc.ClusterClusterConfigMasterConfigAcceleratorArgs{
    							AcceleratorType:  pulumi.String("nvidia-tesla-k80"),
    							AcceleratorCount: pulumi.Int(1),
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var acceleratedCluster = new Gcp.Dataproc.Cluster("accelerated_cluster", new()
        {
            Name = "my-cluster-with-gpu",
            Region = "us-central1",
            ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
            {
                GceClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigArgs
                {
                    Zone = "us-central1-a",
                },
                MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
                {
                    Accelerators = new[]
                    {
                        new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigAcceleratorArgs
                        {
                            AcceleratorType = "nvidia-tesla-k80",
                            AcceleratorCount = 1,
                        },
                    },
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.dataproc.Cluster;
    import com.pulumi.gcp.dataproc.ClusterArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigGceClusterConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var acceleratedCluster = new Cluster("acceleratedCluster", ClusterArgs.builder()        
                .name("my-cluster-with-gpu")
                .region("us-central1")
                .clusterConfig(ClusterClusterConfigArgs.builder()
                    .gceClusterConfig(ClusterClusterConfigGceClusterConfigArgs.builder()
                        .zone("us-central1-a")
                        .build())
                    .masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
                        .accelerators(ClusterClusterConfigMasterConfigAcceleratorArgs.builder()
                            .acceleratorType("nvidia-tesla-k80")
                            .acceleratorCount("1")
                            .build())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      acceleratedCluster:
        type: gcp:dataproc:Cluster
        name: accelerated_cluster
        properties:
          name: my-cluster-with-gpu
          region: us-central1
          clusterConfig:
            gceClusterConfig:
              zone: us-central1-a
            masterConfig:
              accelerators:
                - acceleratorType: nvidia-tesla-k80
                  acceleratorCount: '1'
    

    Create Cluster Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Cluster(name: string, args?: ClusterArgs, opts?: CustomResourceOptions);
    @overload
    def Cluster(resource_name: str,
                args: Optional[ClusterArgs] = None,
                opts: Optional[ResourceOptions] = None)
    
    @overload
    def Cluster(resource_name: str,
                opts: Optional[ResourceOptions] = None,
                cluster_config: Optional[ClusterClusterConfigArgs] = None,
                graceful_decommission_timeout: Optional[str] = None,
                labels: Optional[Mapping[str, str]] = None,
                name: Optional[str] = None,
                project: Optional[str] = None,
                region: Optional[str] = None,
                virtual_cluster_config: Optional[ClusterVirtualClusterConfigArgs] = None)
    func NewCluster(ctx *Context, name string, args *ClusterArgs, opts ...ResourceOption) (*Cluster, error)
    public Cluster(string name, ClusterArgs? args = null, CustomResourceOptions? opts = null)
    public Cluster(String name, ClusterArgs args)
    public Cluster(String name, ClusterArgs args, CustomResourceOptions options)
    
    type: gcp:dataproc:Cluster
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var exampleclusterResourceResourceFromDataproccluster = new Gcp.Dataproc.Cluster("exampleclusterResourceResourceFromDataproccluster", new()
    {
        ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
        {
            AutoscalingConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigAutoscalingConfigArgs
            {
                PolicyUri = "string",
            },
            AuxiliaryNodeGroups = new[]
            {
                new Gcp.Dataproc.Inputs.ClusterClusterConfigAuxiliaryNodeGroupArgs
                {
                    NodeGroups = new[]
                    {
                        new Gcp.Dataproc.Inputs.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupArgs
                        {
                            Roles = new[]
                            {
                                "string",
                            },
                            Name = "string",
                            NodeGroupConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigArgs
                            {
                                Accelerators = new[]
                                {
                                    new Gcp.Dataproc.Inputs.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAcceleratorArgs
                                    {
                                        AcceleratorCount = 0,
                                        AcceleratorType = "string",
                                    },
                                },
                                DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfigArgs
                                {
                                    BootDiskSizeGb = 0,
                                    BootDiskType = "string",
                                    NumLocalSsds = 0,
                                },
                                InstanceNames = new[]
                                {
                                    "string",
                                },
                                MachineType = "string",
                                MinCpuPlatform = "string",
                                NumInstances = 0,
                            },
                        },
                    },
                    NodeGroupId = "string",
                },
            },
            Bucket = "string",
            DataprocMetricConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigDataprocMetricConfigArgs
            {
                Metrics = new[]
                {
                    new Gcp.Dataproc.Inputs.ClusterClusterConfigDataprocMetricConfigMetricArgs
                    {
                        MetricSource = "string",
                        MetricOverrides = new[]
                        {
                            "string",
                        },
                    },
                },
            },
            EncryptionConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigEncryptionConfigArgs
            {
                KmsKeyName = "string",
            },
            EndpointConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigEndpointConfigArgs
            {
                EnableHttpPortAccess = false,
                HttpPorts = 
                {
                    { "string", "any" },
                },
            },
            GceClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigArgs
            {
                InternalIpOnly = false,
                Metadata = 
                {
                    { "string", "string" },
                },
                Network = "string",
                NodeGroupAffinity = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigNodeGroupAffinityArgs
                {
                    NodeGroupUri = "string",
                },
                ReservationAffinity = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigReservationAffinityArgs
                {
                    ConsumeReservationType = "string",
                    Key = "string",
                    Values = new[]
                    {
                        "string",
                    },
                },
                ServiceAccount = "string",
                ServiceAccountScopes = new[]
                {
                    "string",
                },
                ShieldedInstanceConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs
                {
                    EnableIntegrityMonitoring = false,
                    EnableSecureBoot = false,
                    EnableVtpm = false,
                },
                Subnetwork = "string",
                Tags = new[]
                {
                    "string",
                },
                Zone = "string",
            },
            InitializationActions = new[]
            {
                new Gcp.Dataproc.Inputs.ClusterClusterConfigInitializationActionArgs
                {
                    Script = "string",
                    TimeoutSec = 0,
                },
            },
            LifecycleConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigLifecycleConfigArgs
            {
                AutoDeleteTime = "string",
                IdleDeleteTtl = "string",
                IdleStartTime = "string",
            },
            MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
            {
                Accelerators = new[]
                {
                    new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigAcceleratorArgs
                    {
                        AcceleratorCount = 0,
                        AcceleratorType = "string",
                    },
                },
                DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigDiskConfigArgs
                {
                    BootDiskSizeGb = 0,
                    BootDiskType = "string",
                    NumLocalSsds = 0,
                },
                ImageUri = "string",
                InstanceNames = new[]
                {
                    "string",
                },
                MachineType = "string",
                MinCpuPlatform = "string",
                NumInstances = 0,
            },
            MetastoreConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMetastoreConfigArgs
            {
                DataprocMetastoreService = "string",
            },
            PreemptibleWorkerConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigArgs
            {
                DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs
                {
                    BootDiskSizeGb = 0,
                    BootDiskType = "string",
                    NumLocalSsds = 0,
                },
                InstanceFlexibilityPolicy = new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyArgs
                {
                    InstanceSelectionLists = new[]
                    {
                        new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionListArgs
                        {
                            MachineTypes = new[]
                            {
                                "string",
                            },
                            Rank = 0,
                        },
                    },
                    InstanceSelectionResults = new[]
                    {
                        new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResultArgs
                        {
                            MachineType = "string",
                            VmCount = 0,
                        },
                    },
                },
                InstanceNames = new[]
                {
                    "string",
                },
                NumInstances = 0,
                Preemptibility = "string",
            },
            SecurityConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSecurityConfigArgs
            {
                KerberosConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSecurityConfigKerberosConfigArgs
                {
                    KmsKeyUri = "string",
                    RootPrincipalPasswordUri = "string",
                    CrossRealmTrustSharedPasswordUri = "string",
                    CrossRealmTrustAdminServer = "string",
                    EnableKerberos = false,
                    KdcDbKeyUri = "string",
                    KeyPasswordUri = "string",
                    KeystorePasswordUri = "string",
                    KeystoreUri = "string",
                    CrossRealmTrustRealm = "string",
                    Realm = "string",
                    CrossRealmTrustKdc = "string",
                    TgtLifetimeHours = 0,
                    TruststorePasswordUri = "string",
                    TruststoreUri = "string",
                },
            },
            SoftwareConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSoftwareConfigArgs
            {
                ImageVersion = "string",
                OptionalComponents = new[]
                {
                    "string",
                },
                OverrideProperties = 
                {
                    { "string", "string" },
                },
                Properties = 
                {
                    { "string", "any" },
                },
            },
            StagingBucket = "string",
            TempBucket = "string",
            WorkerConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigArgs
            {
                Accelerators = new[]
                {
                    new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigAcceleratorArgs
                    {
                        AcceleratorCount = 0,
                        AcceleratorType = "string",
                    },
                },
                DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigDiskConfigArgs
                {
                    BootDiskSizeGb = 0,
                    BootDiskType = "string",
                    NumLocalSsds = 0,
                },
                ImageUri = "string",
                InstanceNames = new[]
                {
                    "string",
                },
                MachineType = "string",
                MinCpuPlatform = "string",
                MinNumInstances = 0,
                NumInstances = 0,
            },
        },
        GracefulDecommissionTimeout = "string",
        Labels = 
        {
            { "string", "string" },
        },
        Name = "string",
        Project = "string",
        Region = "string",
        VirtualClusterConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigArgs
        {
            AuxiliaryServicesConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigAuxiliaryServicesConfigArgs
            {
                MetastoreConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigArgs
                {
                    DataprocMetastoreService = "string",
                },
                SparkHistoryServerConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigArgs
                {
                    DataprocCluster = "string",
                },
            },
            KubernetesClusterConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigArgs
            {
                GkeClusterConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigArgs
                {
                    GkeClusterTarget = "string",
                    NodePoolTargets = new[]
                    {
                        new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetArgs
                        {
                            NodePool = "string",
                            Roles = new[]
                            {
                                "string",
                            },
                            NodePoolConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigArgs
                            {
                                Locations = new[]
                                {
                                    "string",
                                },
                                Autoscaling = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingArgs
                                {
                                    MaxNodeCount = 0,
                                    MinNodeCount = 0,
                                },
                                Config = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigArgs
                                {
                                    LocalSsdCount = 0,
                                    MachineType = "string",
                                    MinCpuPlatform = "string",
                                    Preemptible = false,
                                    Spot = false,
                                },
                            },
                        },
                    },
                },
                KubernetesSoftwareConfig = new Gcp.Dataproc.Inputs.ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigArgs
                {
                    ComponentVersion = 
                    {
                        { "string", "string" },
                    },
                    Properties = 
                    {
                        { "string", "string" },
                    },
                },
                KubernetesNamespace = "string",
            },
            StagingBucket = "string",
        },
    });
    
    example, err := dataproc.NewCluster(ctx, "exampleclusterResourceResourceFromDataproccluster", &dataproc.ClusterArgs{
    	ClusterConfig: &dataproc.ClusterClusterConfigArgs{
    		AutoscalingConfig: &dataproc.ClusterClusterConfigAutoscalingConfigArgs{
    			PolicyUri: pulumi.String("string"),
    		},
    		AuxiliaryNodeGroups: dataproc.ClusterClusterConfigAuxiliaryNodeGroupArray{
    			&dataproc.ClusterClusterConfigAuxiliaryNodeGroupArgs{
    				NodeGroups: dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupArray{
    					&dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupArgs{
    						Roles: pulumi.StringArray{
    							pulumi.String("string"),
    						},
    						Name: pulumi.String("string"),
    						NodeGroupConfig: &dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigArgs{
    							Accelerators: dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAcceleratorArray{
    								&dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAcceleratorArgs{
    									AcceleratorCount: pulumi.Int(0),
    									AcceleratorType:  pulumi.String("string"),
    								},
    							},
    							DiskConfig: &dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfigArgs{
    								BootDiskSizeGb: pulumi.Int(0),
    								BootDiskType:   pulumi.String("string"),
    								NumLocalSsds:   pulumi.Int(0),
    							},
    							InstanceNames: pulumi.StringArray{
    								pulumi.String("string"),
    							},
    							MachineType:    pulumi.String("string"),
    							MinCpuPlatform: pulumi.String("string"),
    							NumInstances:   pulumi.Int(0),
    						},
    					},
    				},
    				NodeGroupId: pulumi.String("string"),
    			},
    		},
    		Bucket: pulumi.String("string"),
    		DataprocMetricConfig: &dataproc.ClusterClusterConfigDataprocMetricConfigArgs{
    			Metrics: dataproc.ClusterClusterConfigDataprocMetricConfigMetricArray{
    				&dataproc.ClusterClusterConfigDataprocMetricConfigMetricArgs{
    					MetricSource: pulumi.String("string"),
    					MetricOverrides: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    				},
    			},
    		},
    		EncryptionConfig: &dataproc.ClusterClusterConfigEncryptionConfigArgs{
    			KmsKeyName: pulumi.String("string"),
    		},
    		EndpointConfig: &dataproc.ClusterClusterConfigEndpointConfigArgs{
    			EnableHttpPortAccess: pulumi.Bool(false),
    			HttpPorts: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    		},
    		GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
    			InternalIpOnly: pulumi.Bool(false),
    			Metadata: pulumi.StringMap{
    				"string": pulumi.String("string"),
    			},
    			Network: pulumi.String("string"),
    			NodeGroupAffinity: &dataproc.ClusterClusterConfigGceClusterConfigNodeGroupAffinityArgs{
    				NodeGroupUri: pulumi.String("string"),
    			},
    			ReservationAffinity: &dataproc.ClusterClusterConfigGceClusterConfigReservationAffinityArgs{
    				ConsumeReservationType: pulumi.String("string"),
    				Key:                    pulumi.String("string"),
    				Values: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    			},
    			ServiceAccount: pulumi.String("string"),
    			ServiceAccountScopes: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			ShieldedInstanceConfig: &dataproc.ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs{
    				EnableIntegrityMonitoring: pulumi.Bool(false),
    				EnableSecureBoot:          pulumi.Bool(false),
    				EnableVtpm:                pulumi.Bool(false),
    			},
    			Subnetwork: pulumi.String("string"),
    			Tags: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			Zone: pulumi.String("string"),
    		},
    		InitializationActions: dataproc.ClusterClusterConfigInitializationActionArray{
    			&dataproc.ClusterClusterConfigInitializationActionArgs{
    				Script:     pulumi.String("string"),
    				TimeoutSec: pulumi.Int(0),
    			},
    		},
    		LifecycleConfig: &dataproc.ClusterClusterConfigLifecycleConfigArgs{
    			AutoDeleteTime: pulumi.String("string"),
    			IdleDeleteTtl:  pulumi.String("string"),
    			IdleStartTime:  pulumi.String("string"),
    		},
    		MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
    			Accelerators: dataproc.ClusterClusterConfigMasterConfigAcceleratorArray{
    				&dataproc.ClusterClusterConfigMasterConfigAcceleratorArgs{
    					AcceleratorCount: pulumi.Int(0),
    					AcceleratorType:  pulumi.String("string"),
    				},
    			},
    			DiskConfig: &dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs{
    				BootDiskSizeGb: pulumi.Int(0),
    				BootDiskType:   pulumi.String("string"),
    				NumLocalSsds:   pulumi.Int(0),
    			},
    			ImageUri: pulumi.String("string"),
    			InstanceNames: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			MachineType:    pulumi.String("string"),
    			MinCpuPlatform: pulumi.String("string"),
    			NumInstances:   pulumi.Int(0),
    		},
    		MetastoreConfig: &dataproc.ClusterClusterConfigMetastoreConfigArgs{
    			DataprocMetastoreService: pulumi.String("string"),
    		},
    		PreemptibleWorkerConfig: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigArgs{
    			DiskConfig: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs{
    				BootDiskSizeGb: pulumi.Int(0),
    				BootDiskType:   pulumi.String("string"),
    				NumLocalSsds:   pulumi.Int(0),
    			},
    			InstanceFlexibilityPolicy: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyArgs{
    				InstanceSelectionLists: dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionListArray{
    					&dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionListArgs{
    						MachineTypes: pulumi.StringArray{
    							pulumi.String("string"),
    						},
    						Rank: pulumi.Int(0),
    					},
    				},
    				InstanceSelectionResults: dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResultArray{
    					&dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResultArgs{
    						MachineType: pulumi.String("string"),
    						VmCount:     pulumi.Int(0),
    					},
    				},
    			},
    			InstanceNames: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			NumInstances:   pulumi.Int(0),
    			Preemptibility: pulumi.String("string"),
    		},
    		SecurityConfig: &dataproc.ClusterClusterConfigSecurityConfigArgs{
    			KerberosConfig: &dataproc.ClusterClusterConfigSecurityConfigKerberosConfigArgs{
    				KmsKeyUri:                        pulumi.String("string"),
    				RootPrincipalPasswordUri:         pulumi.String("string"),
    				CrossRealmTrustSharedPasswordUri: pulumi.String("string"),
    				CrossRealmTrustAdminServer:       pulumi.String("string"),
    				EnableKerberos:                   pulumi.Bool(false),
    				KdcDbKeyUri:                      pulumi.String("string"),
    				KeyPasswordUri:                   pulumi.String("string"),
    				KeystorePasswordUri:              pulumi.String("string"),
    				KeystoreUri:                      pulumi.String("string"),
    				CrossRealmTrustRealm:             pulumi.String("string"),
    				Realm:                            pulumi.String("string"),
    				CrossRealmTrustKdc:               pulumi.String("string"),
    				TgtLifetimeHours:                 pulumi.Int(0),
    				TruststorePasswordUri:            pulumi.String("string"),
    				TruststoreUri:                    pulumi.String("string"),
    			},
    		},
    		SoftwareConfig: &dataproc.ClusterClusterConfigSoftwareConfigArgs{
    			ImageVersion: pulumi.String("string"),
    			OptionalComponents: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			OverrideProperties: pulumi.StringMap{
    				"string": pulumi.String("string"),
    			},
    			Properties: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    		},
    		StagingBucket: pulumi.String("string"),
    		TempBucket:    pulumi.String("string"),
    		WorkerConfig: &dataproc.ClusterClusterConfigWorkerConfigArgs{
    			Accelerators: dataproc.ClusterClusterConfigWorkerConfigAcceleratorArray{
    				&dataproc.ClusterClusterConfigWorkerConfigAcceleratorArgs{
    					AcceleratorCount: pulumi.Int(0),
    					AcceleratorType:  pulumi.String("string"),
    				},
    			},
    			DiskConfig: &dataproc.ClusterClusterConfigWorkerConfigDiskConfigArgs{
    				BootDiskSizeGb: pulumi.Int(0),
    				BootDiskType:   pulumi.String("string"),
    				NumLocalSsds:   pulumi.Int(0),
    			},
    			ImageUri: pulumi.String("string"),
    			InstanceNames: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			MachineType:     pulumi.String("string"),
    			MinCpuPlatform:  pulumi.String("string"),
    			MinNumInstances: pulumi.Int(0),
    			NumInstances:    pulumi.Int(0),
    		},
    	},
    	GracefulDecommissionTimeout: pulumi.String("string"),
    	Labels: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	Name:    pulumi.String("string"),
    	Project: pulumi.String("string"),
    	Region:  pulumi.String("string"),
    	VirtualClusterConfig: &dataproc.ClusterVirtualClusterConfigArgs{
    		AuxiliaryServicesConfig: &dataproc.ClusterVirtualClusterConfigAuxiliaryServicesConfigArgs{
    			MetastoreConfig: &dataproc.ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigArgs{
    				DataprocMetastoreService: pulumi.String("string"),
    			},
    			SparkHistoryServerConfig: &dataproc.ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigArgs{
    				DataprocCluster: pulumi.String("string"),
    			},
    		},
    		KubernetesClusterConfig: &dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigArgs{
    			GkeClusterConfig: &dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigArgs{
    				GkeClusterTarget: pulumi.String("string"),
    				NodePoolTargets: dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetArray{
    					&dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetArgs{
    						NodePool: pulumi.String("string"),
    						Roles: pulumi.StringArray{
    							pulumi.String("string"),
    						},
    						NodePoolConfig: &dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigArgs{
    							Locations: pulumi.StringArray{
    								pulumi.String("string"),
    							},
    							Autoscaling: &dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingArgs{
    								MaxNodeCount: pulumi.Int(0),
    								MinNodeCount: pulumi.Int(0),
    							},
    							Config: &dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigArgs{
    								LocalSsdCount:  pulumi.Int(0),
    								MachineType:    pulumi.String("string"),
    								MinCpuPlatform: pulumi.String("string"),
    								Preemptible:    pulumi.Bool(false),
    								Spot:           pulumi.Bool(false),
    							},
    						},
    					},
    				},
    			},
    			KubernetesSoftwareConfig: &dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigArgs{
    				ComponentVersion: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    				Properties: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    			},
    			KubernetesNamespace: pulumi.String("string"),
    		},
    		StagingBucket: pulumi.String("string"),
    	},
    })
    
    var exampleclusterResourceResourceFromDataproccluster = new Cluster("exampleclusterResourceResourceFromDataproccluster", ClusterArgs.builder()        
        .clusterConfig(ClusterClusterConfigArgs.builder()
            .autoscalingConfig(ClusterClusterConfigAutoscalingConfigArgs.builder()
                .policyUri("string")
                .build())
            .auxiliaryNodeGroups(ClusterClusterConfigAuxiliaryNodeGroupArgs.builder()
                .nodeGroups(ClusterClusterConfigAuxiliaryNodeGroupNodeGroupArgs.builder()
                    .roles("string")
                    .name("string")
                    .nodeGroupConfig(ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigArgs.builder()
                        .accelerators(ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAcceleratorArgs.builder()
                            .acceleratorCount(0)
                            .acceleratorType("string")
                            .build())
                        .diskConfig(ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfigArgs.builder()
                            .bootDiskSizeGb(0)
                            .bootDiskType("string")
                            .numLocalSsds(0)
                            .build())
                        .instanceNames("string")
                        .machineType("string")
                        .minCpuPlatform("string")
                        .numInstances(0)
                        .build())
                    .build())
                .nodeGroupId("string")
                .build())
            .bucket("string")
            .dataprocMetricConfig(ClusterClusterConfigDataprocMetricConfigArgs.builder()
                .metrics(ClusterClusterConfigDataprocMetricConfigMetricArgs.builder()
                    .metricSource("string")
                    .metricOverrides("string")
                    .build())
                .build())
            .encryptionConfig(ClusterClusterConfigEncryptionConfigArgs.builder()
                .kmsKeyName("string")
                .build())
            .endpointConfig(ClusterClusterConfigEndpointConfigArgs.builder()
                .enableHttpPortAccess(false)
                .httpPorts(Map.of("string", "any"))
                .build())
            .gceClusterConfig(ClusterClusterConfigGceClusterConfigArgs.builder()
                .internalIpOnly(false)
                .metadata(Map.of("string", "string"))
                .network("string")
                .nodeGroupAffinity(ClusterClusterConfigGceClusterConfigNodeGroupAffinityArgs.builder()
                    .nodeGroupUri("string")
                    .build())
                .reservationAffinity(ClusterClusterConfigGceClusterConfigReservationAffinityArgs.builder()
                    .consumeReservationType("string")
                    .key("string")
                    .values("string")
                    .build())
                .serviceAccount("string")
                .serviceAccountScopes("string")
                .shieldedInstanceConfig(ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs.builder()
                    .enableIntegrityMonitoring(false)
                    .enableSecureBoot(false)
                    .enableVtpm(false)
                    .build())
                .subnetwork("string")
                .tags("string")
                .zone("string")
                .build())
            .initializationActions(ClusterClusterConfigInitializationActionArgs.builder()
                .script("string")
                .timeoutSec(0)
                .build())
            .lifecycleConfig(ClusterClusterConfigLifecycleConfigArgs.builder()
                .autoDeleteTime("string")
                .idleDeleteTtl("string")
                .idleStartTime("string")
                .build())
            .masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
                .accelerators(ClusterClusterConfigMasterConfigAcceleratorArgs.builder()
                    .acceleratorCount(0)
                    .acceleratorType("string")
                    .build())
                .diskConfig(ClusterClusterConfigMasterConfigDiskConfigArgs.builder()
                    .bootDiskSizeGb(0)
                    .bootDiskType("string")
                    .numLocalSsds(0)
                    .build())
                .imageUri("string")
                .instanceNames("string")
                .machineType("string")
                .minCpuPlatform("string")
                .numInstances(0)
                .build())
            .metastoreConfig(ClusterClusterConfigMetastoreConfigArgs.builder()
                .dataprocMetastoreService("string")
                .build())
            .preemptibleWorkerConfig(ClusterClusterConfigPreemptibleWorkerConfigArgs.builder()
                .diskConfig(ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs.builder()
                    .bootDiskSizeGb(0)
                    .bootDiskType("string")
                    .numLocalSsds(0)
                    .build())
                .instanceFlexibilityPolicy(ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyArgs.builder()
                    .instanceSelectionLists(ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionListArgs.builder()
                        .machineTypes("string")
                        .rank(0)
                        .build())
                    .instanceSelectionResults(ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResultArgs.builder()
                        .machineType("string")
                        .vmCount(0)
                        .build())
                    .build())
                .instanceNames("string")
                .numInstances(0)
                .preemptibility("string")
                .build())
            .securityConfig(ClusterClusterConfigSecurityConfigArgs.builder()
                .kerberosConfig(ClusterClusterConfigSecurityConfigKerberosConfigArgs.builder()
                    .kmsKeyUri("string")
                    .rootPrincipalPasswordUri("string")
                    .crossRealmTrustSharedPasswordUri("string")
                    .crossRealmTrustAdminServer("string")
                    .enableKerberos(false)
                    .kdcDbKeyUri("string")
                    .keyPasswordUri("string")
                    .keystorePasswordUri("string")
                    .keystoreUri("string")
                    .crossRealmTrustRealm("string")
                    .realm("string")
                    .crossRealmTrustKdc("string")
                    .tgtLifetimeHours(0)
                    .truststorePasswordUri("string")
                    .truststoreUri("string")
                    .build())
                .build())
            .softwareConfig(ClusterClusterConfigSoftwareConfigArgs.builder()
                .imageVersion("string")
                .optionalComponents("string")
                .overrideProperties(Map.of("string", "string"))
                .properties(Map.of("string", "any"))
                .build())
            .stagingBucket("string")
            .tempBucket("string")
            .workerConfig(ClusterClusterConfigWorkerConfigArgs.builder()
                .accelerators(ClusterClusterConfigWorkerConfigAcceleratorArgs.builder()
                    .acceleratorCount(0)
                    .acceleratorType("string")
                    .build())
                .diskConfig(ClusterClusterConfigWorkerConfigDiskConfigArgs.builder()
                    .bootDiskSizeGb(0)
                    .bootDiskType("string")
                    .numLocalSsds(0)
                    .build())
                .imageUri("string")
                .instanceNames("string")
                .machineType("string")
                .minCpuPlatform("string")
                .minNumInstances(0)
                .numInstances(0)
                .build())
            .build())
        .gracefulDecommissionTimeout("string")
        .labels(Map.of("string", "string"))
        .name("string")
        .project("string")
        .region("string")
        .virtualClusterConfig(ClusterVirtualClusterConfigArgs.builder()
            .auxiliaryServicesConfig(ClusterVirtualClusterConfigAuxiliaryServicesConfigArgs.builder()
                .metastoreConfig(ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigArgs.builder()
                    .dataprocMetastoreService("string")
                    .build())
                .sparkHistoryServerConfig(ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigArgs.builder()
                    .dataprocCluster("string")
                    .build())
                .build())
            .kubernetesClusterConfig(ClusterVirtualClusterConfigKubernetesClusterConfigArgs.builder()
                .gkeClusterConfig(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigArgs.builder()
                    .gkeClusterTarget("string")
                    .nodePoolTargets(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetArgs.builder()
                        .nodePool("string")
                        .roles("string")
                        .nodePoolConfig(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigArgs.builder()
                            .locations("string")
                            .autoscaling(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingArgs.builder()
                                .maxNodeCount(0)
                                .minNodeCount(0)
                                .build())
                            .config(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigArgs.builder()
                                .localSsdCount(0)
                                .machineType("string")
                                .minCpuPlatform("string")
                                .preemptible(false)
                                .spot(false)
                                .build())
                            .build())
                        .build())
                    .build())
                .kubernetesSoftwareConfig(ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigArgs.builder()
                    .componentVersion(Map.of("string", "string"))
                    .properties(Map.of("string", "string"))
                    .build())
                .kubernetesNamespace("string")
                .build())
            .stagingBucket("string")
            .build())
        .build());
    
    examplecluster_resource_resource_from_dataproccluster = gcp.dataproc.Cluster("exampleclusterResourceResourceFromDataproccluster",
        cluster_config=gcp.dataproc.ClusterClusterConfigArgs(
            autoscaling_config=gcp.dataproc.ClusterClusterConfigAutoscalingConfigArgs(
                policy_uri="string",
            ),
            auxiliary_node_groups=[gcp.dataproc.ClusterClusterConfigAuxiliaryNodeGroupArgs(
                node_groups=[gcp.dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupArgs(
                    roles=["string"],
                    name="string",
                    node_group_config=gcp.dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigArgs(
                        accelerators=[gcp.dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAcceleratorArgs(
                            accelerator_count=0,
                            accelerator_type="string",
                        )],
                        disk_config=gcp.dataproc.ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfigArgs(
                            boot_disk_size_gb=0,
                            boot_disk_type="string",
                            num_local_ssds=0,
                        ),
                        instance_names=["string"],
                        machine_type="string",
                        min_cpu_platform="string",
                        num_instances=0,
                    ),
                )],
                node_group_id="string",
            )],
            bucket="string",
            dataproc_metric_config=gcp.dataproc.ClusterClusterConfigDataprocMetricConfigArgs(
                metrics=[gcp.dataproc.ClusterClusterConfigDataprocMetricConfigMetricArgs(
                    metric_source="string",
                    metric_overrides=["string"],
                )],
            ),
            encryption_config=gcp.dataproc.ClusterClusterConfigEncryptionConfigArgs(
                kms_key_name="string",
            ),
            endpoint_config=gcp.dataproc.ClusterClusterConfigEndpointConfigArgs(
                enable_http_port_access=False,
                http_ports={
                    "string": "any",
                },
            ),
            gce_cluster_config=gcp.dataproc.ClusterClusterConfigGceClusterConfigArgs(
                internal_ip_only=False,
                metadata={
                    "string": "string",
                },
                network="string",
                node_group_affinity=gcp.dataproc.ClusterClusterConfigGceClusterConfigNodeGroupAffinityArgs(
                    node_group_uri="string",
                ),
                reservation_affinity=gcp.dataproc.ClusterClusterConfigGceClusterConfigReservationAffinityArgs(
                    consume_reservation_type="string",
                    key="string",
                    values=["string"],
                ),
                service_account="string",
                service_account_scopes=["string"],
                shielded_instance_config=gcp.dataproc.ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs(
                    enable_integrity_monitoring=False,
                    enable_secure_boot=False,
                    enable_vtpm=False,
                ),
                subnetwork="string",
                tags=["string"],
                zone="string",
            ),
            initialization_actions=[gcp.dataproc.ClusterClusterConfigInitializationActionArgs(
                script="string",
                timeout_sec=0,
            )],
            lifecycle_config=gcp.dataproc.ClusterClusterConfigLifecycleConfigArgs(
                auto_delete_time="string",
                idle_delete_ttl="string",
                idle_start_time="string",
            ),
            master_config=gcp.dataproc.ClusterClusterConfigMasterConfigArgs(
                accelerators=[gcp.dataproc.ClusterClusterConfigMasterConfigAcceleratorArgs(
                    accelerator_count=0,
                    accelerator_type="string",
                )],
                disk_config=gcp.dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs(
                    boot_disk_size_gb=0,
                    boot_disk_type="string",
                    num_local_ssds=0,
                ),
                image_uri="string",
                instance_names=["string"],
                machine_type="string",
                min_cpu_platform="string",
                num_instances=0,
            ),
            metastore_config=gcp.dataproc.ClusterClusterConfigMetastoreConfigArgs(
                dataproc_metastore_service="string",
            ),
            preemptible_worker_config=gcp.dataproc.ClusterClusterConfigPreemptibleWorkerConfigArgs(
                disk_config=gcp.dataproc.ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs(
                    boot_disk_size_gb=0,
                    boot_disk_type="string",
                    num_local_ssds=0,
                ),
                instance_flexibility_policy=gcp.dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyArgs(
                    instance_selection_lists=[gcp.dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionListArgs(
                        machine_types=["string"],
                        rank=0,
                    )],
                    instance_selection_results=[gcp.dataproc.ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResultArgs(
                        machine_type="string",
                        vm_count=0,
                    )],
                ),
                instance_names=["string"],
                num_instances=0,
                preemptibility="string",
            ),
            security_config=gcp.dataproc.ClusterClusterConfigSecurityConfigArgs(
                kerberos_config=gcp.dataproc.ClusterClusterConfigSecurityConfigKerberosConfigArgs(
                    kms_key_uri="string",
                    root_principal_password_uri="string",
                    cross_realm_trust_shared_password_uri="string",
                    cross_realm_trust_admin_server="string",
                    enable_kerberos=False,
                    kdc_db_key_uri="string",
                    key_password_uri="string",
                    keystore_password_uri="string",
                    keystore_uri="string",
                    cross_realm_trust_realm="string",
                    realm="string",
                    cross_realm_trust_kdc="string",
                    tgt_lifetime_hours=0,
                    truststore_password_uri="string",
                    truststore_uri="string",
                ),
            ),
            software_config=gcp.dataproc.ClusterClusterConfigSoftwareConfigArgs(
                image_version="string",
                optional_components=["string"],
                override_properties={
                    "string": "string",
                },
                properties={
                    "string": "any",
                },
            ),
            staging_bucket="string",
            temp_bucket="string",
            worker_config=gcp.dataproc.ClusterClusterConfigWorkerConfigArgs(
                accelerators=[gcp.dataproc.ClusterClusterConfigWorkerConfigAcceleratorArgs(
                    accelerator_count=0,
                    accelerator_type="string",
                )],
                disk_config=gcp.dataproc.ClusterClusterConfigWorkerConfigDiskConfigArgs(
                    boot_disk_size_gb=0,
                    boot_disk_type="string",
                    num_local_ssds=0,
                ),
                image_uri="string",
                instance_names=["string"],
                machine_type="string",
                min_cpu_platform="string",
                min_num_instances=0,
                num_instances=0,
            ),
        ),
        graceful_decommission_timeout="string",
        labels={
            "string": "string",
        },
        name="string",
        project="string",
        region="string",
        virtual_cluster_config=gcp.dataproc.ClusterVirtualClusterConfigArgs(
            auxiliary_services_config=gcp.dataproc.ClusterVirtualClusterConfigAuxiliaryServicesConfigArgs(
                metastore_config=gcp.dataproc.ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigArgs(
                    dataproc_metastore_service="string",
                ),
                spark_history_server_config=gcp.dataproc.ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigArgs(
                    dataproc_cluster="string",
                ),
            ),
            kubernetes_cluster_config=gcp.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigArgs(
                gke_cluster_config=gcp.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigArgs(
                    gke_cluster_target="string",
                    node_pool_targets=[gcp.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetArgs(
                        node_pool="string",
                        roles=["string"],
                        node_pool_config=gcp.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigArgs(
                            locations=["string"],
                            autoscaling=gcp.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingArgs(
                                max_node_count=0,
                                min_node_count=0,
                            ),
                            config=gcp.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigArgs(
                                local_ssd_count=0,
                                machine_type="string",
                                min_cpu_platform="string",
                                preemptible=False,
                                spot=False,
                            ),
                        ),
                    )],
                ),
                kubernetes_software_config=gcp.dataproc.ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigArgs(
                    component_version={
                        "string": "string",
                    },
                    properties={
                        "string": "string",
                    },
                ),
                kubernetes_namespace="string",
            ),
            staging_bucket="string",
        ))
    
    const exampleclusterResourceResourceFromDataproccluster = new gcp.dataproc.Cluster("exampleclusterResourceResourceFromDataproccluster", {
        clusterConfig: {
            autoscalingConfig: {
                policyUri: "string",
            },
            auxiliaryNodeGroups: [{
                nodeGroups: [{
                    roles: ["string"],
                    name: "string",
                    nodeGroupConfig: {
                        accelerators: [{
                            acceleratorCount: 0,
                            acceleratorType: "string",
                        }],
                        diskConfig: {
                            bootDiskSizeGb: 0,
                            bootDiskType: "string",
                            numLocalSsds: 0,
                        },
                        instanceNames: ["string"],
                        machineType: "string",
                        minCpuPlatform: "string",
                        numInstances: 0,
                    },
                }],
                nodeGroupId: "string",
            }],
            bucket: "string",
            dataprocMetricConfig: {
                metrics: [{
                    metricSource: "string",
                    metricOverrides: ["string"],
                }],
            },
            encryptionConfig: {
                kmsKeyName: "string",
            },
            endpointConfig: {
                enableHttpPortAccess: false,
                httpPorts: {
                    string: "any",
                },
            },
            gceClusterConfig: {
                internalIpOnly: false,
                metadata: {
                    string: "string",
                },
                network: "string",
                nodeGroupAffinity: {
                    nodeGroupUri: "string",
                },
                reservationAffinity: {
                    consumeReservationType: "string",
                    key: "string",
                    values: ["string"],
                },
                serviceAccount: "string",
                serviceAccountScopes: ["string"],
                shieldedInstanceConfig: {
                    enableIntegrityMonitoring: false,
                    enableSecureBoot: false,
                    enableVtpm: false,
                },
                subnetwork: "string",
                tags: ["string"],
                zone: "string",
            },
            initializationActions: [{
                script: "string",
                timeoutSec: 0,
            }],
            lifecycleConfig: {
                autoDeleteTime: "string",
                idleDeleteTtl: "string",
                idleStartTime: "string",
            },
            masterConfig: {
                accelerators: [{
                    acceleratorCount: 0,
                    acceleratorType: "string",
                }],
                diskConfig: {
                    bootDiskSizeGb: 0,
                    bootDiskType: "string",
                    numLocalSsds: 0,
                },
                imageUri: "string",
                instanceNames: ["string"],
                machineType: "string",
                minCpuPlatform: "string",
                numInstances: 0,
            },
            metastoreConfig: {
                dataprocMetastoreService: "string",
            },
            preemptibleWorkerConfig: {
                diskConfig: {
                    bootDiskSizeGb: 0,
                    bootDiskType: "string",
                    numLocalSsds: 0,
                },
                instanceFlexibilityPolicy: {
                    instanceSelectionLists: [{
                        machineTypes: ["string"],
                        rank: 0,
                    }],
                    instanceSelectionResults: [{
                        machineType: "string",
                        vmCount: 0,
                    }],
                },
                instanceNames: ["string"],
                numInstances: 0,
                preemptibility: "string",
            },
            securityConfig: {
                kerberosConfig: {
                    kmsKeyUri: "string",
                    rootPrincipalPasswordUri: "string",
                    crossRealmTrustSharedPasswordUri: "string",
                    crossRealmTrustAdminServer: "string",
                    enableKerberos: false,
                    kdcDbKeyUri: "string",
                    keyPasswordUri: "string",
                    keystorePasswordUri: "string",
                    keystoreUri: "string",
                    crossRealmTrustRealm: "string",
                    realm: "string",
                    crossRealmTrustKdc: "string",
                    tgtLifetimeHours: 0,
                    truststorePasswordUri: "string",
                    truststoreUri: "string",
                },
            },
            softwareConfig: {
                imageVersion: "string",
                optionalComponents: ["string"],
                overrideProperties: {
                    string: "string",
                },
                properties: {
                    string: "any",
                },
            },
            stagingBucket: "string",
            tempBucket: "string",
            workerConfig: {
                accelerators: [{
                    acceleratorCount: 0,
                    acceleratorType: "string",
                }],
                diskConfig: {
                    bootDiskSizeGb: 0,
                    bootDiskType: "string",
                    numLocalSsds: 0,
                },
                imageUri: "string",
                instanceNames: ["string"],
                machineType: "string",
                minCpuPlatform: "string",
                minNumInstances: 0,
                numInstances: 0,
            },
        },
        gracefulDecommissionTimeout: "string",
        labels: {
            string: "string",
        },
        name: "string",
        project: "string",
        region: "string",
        virtualClusterConfig: {
            auxiliaryServicesConfig: {
                metastoreConfig: {
                    dataprocMetastoreService: "string",
                },
                sparkHistoryServerConfig: {
                    dataprocCluster: "string",
                },
            },
            kubernetesClusterConfig: {
                gkeClusterConfig: {
                    gkeClusterTarget: "string",
                    nodePoolTargets: [{
                        nodePool: "string",
                        roles: ["string"],
                        nodePoolConfig: {
                            locations: ["string"],
                            autoscaling: {
                                maxNodeCount: 0,
                                minNodeCount: 0,
                            },
                            config: {
                                localSsdCount: 0,
                                machineType: "string",
                                minCpuPlatform: "string",
                                preemptible: false,
                                spot: false,
                            },
                        },
                    }],
                },
                kubernetesSoftwareConfig: {
                    componentVersion: {
                        string: "string",
                    },
                    properties: {
                        string: "string",
                    },
                },
                kubernetesNamespace: "string",
            },
            stagingBucket: "string",
        },
    });
    
    type: gcp:dataproc:Cluster
    properties:
        clusterConfig:
            autoscalingConfig:
                policyUri: string
            auxiliaryNodeGroups:
                - nodeGroupId: string
                  nodeGroups:
                    - name: string
                      nodeGroupConfig:
                        accelerators:
                            - acceleratorCount: 0
                              acceleratorType: string
                        diskConfig:
                            bootDiskSizeGb: 0
                            bootDiskType: string
                            numLocalSsds: 0
                        instanceNames:
                            - string
                        machineType: string
                        minCpuPlatform: string
                        numInstances: 0
                      roles:
                        - string
            bucket: string
            dataprocMetricConfig:
                metrics:
                    - metricOverrides:
                        - string
                      metricSource: string
            encryptionConfig:
                kmsKeyName: string
            endpointConfig:
                enableHttpPortAccess: false
                httpPorts:
                    string: any
            gceClusterConfig:
                internalIpOnly: false
                metadata:
                    string: string
                network: string
                nodeGroupAffinity:
                    nodeGroupUri: string
                reservationAffinity:
                    consumeReservationType: string
                    key: string
                    values:
                        - string
                serviceAccount: string
                serviceAccountScopes:
                    - string
                shieldedInstanceConfig:
                    enableIntegrityMonitoring: false
                    enableSecureBoot: false
                    enableVtpm: false
                subnetwork: string
                tags:
                    - string
                zone: string
            initializationActions:
                - script: string
                  timeoutSec: 0
            lifecycleConfig:
                autoDeleteTime: string
                idleDeleteTtl: string
                idleStartTime: string
            masterConfig:
                accelerators:
                    - acceleratorCount: 0
                      acceleratorType: string
                diskConfig:
                    bootDiskSizeGb: 0
                    bootDiskType: string
                    numLocalSsds: 0
                imageUri: string
                instanceNames:
                    - string
                machineType: string
                minCpuPlatform: string
                numInstances: 0
            metastoreConfig:
                dataprocMetastoreService: string
            preemptibleWorkerConfig:
                diskConfig:
                    bootDiskSizeGb: 0
                    bootDiskType: string
                    numLocalSsds: 0
                instanceFlexibilityPolicy:
                    instanceSelectionLists:
                        - machineTypes:
                            - string
                          rank: 0
                    instanceSelectionResults:
                        - machineType: string
                          vmCount: 0
                instanceNames:
                    - string
                numInstances: 0
                preemptibility: string
            securityConfig:
                kerberosConfig:
                    crossRealmTrustAdminServer: string
                    crossRealmTrustKdc: string
                    crossRealmTrustRealm: string
                    crossRealmTrustSharedPasswordUri: string
                    enableKerberos: false
                    kdcDbKeyUri: string
                    keyPasswordUri: string
                    keystorePasswordUri: string
                    keystoreUri: string
                    kmsKeyUri: string
                    realm: string
                    rootPrincipalPasswordUri: string
                    tgtLifetimeHours: 0
                    truststorePasswordUri: string
                    truststoreUri: string
            softwareConfig:
                imageVersion: string
                optionalComponents:
                    - string
                overrideProperties:
                    string: string
                properties:
                    string: any
            stagingBucket: string
            tempBucket: string
            workerConfig:
                accelerators:
                    - acceleratorCount: 0
                      acceleratorType: string
                diskConfig:
                    bootDiskSizeGb: 0
                    bootDiskType: string
                    numLocalSsds: 0
                imageUri: string
                instanceNames:
                    - string
                machineType: string
                minCpuPlatform: string
                minNumInstances: 0
                numInstances: 0
        gracefulDecommissionTimeout: string
        labels:
            string: string
        name: string
        project: string
        region: string
        virtualClusterConfig:
            auxiliaryServicesConfig:
                metastoreConfig:
                    dataprocMetastoreService: string
                sparkHistoryServerConfig:
                    dataprocCluster: string
            kubernetesClusterConfig:
                gkeClusterConfig:
                    gkeClusterTarget: string
                    nodePoolTargets:
                        - nodePool: string
                          nodePoolConfig:
                            autoscaling:
                                maxNodeCount: 0
                                minNodeCount: 0
                            config:
                                localSsdCount: 0
                                machineType: string
                                minCpuPlatform: string
                                preemptible: false
                                spot: false
                            locations:
                                - string
                          roles:
                            - string
                kubernetesNamespace: string
                kubernetesSoftwareConfig:
                    componentVersion:
                        string: string
                    properties:
                        string: string
            stagingBucket: string
    

    Cluster Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Cluster resource accepts the following input properties:

    ClusterConfig ClusterClusterConfig
    Allows you to configure various aspects of the cluster. Structure defined below.
    GracefulDecommissionTimeout string
    The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
    Labels Dictionary<string, string>
    The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
    Name string
    The name of the cluster, unique within the project and zone.


    Project string
    The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.
    Region string
    The region in which the cluster and associated nodes will be created in. Defaults to global.
    VirtualClusterConfig ClusterVirtualClusterConfig
    Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
    ClusterConfig ClusterClusterConfigArgs
    Allows you to configure various aspects of the cluster. Structure defined below.
    GracefulDecommissionTimeout string
    The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
    Labels map[string]string
    The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
    Name string
    The name of the cluster, unique within the project and zone.


    Project string
    The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.
    Region string
    The region in which the cluster and associated nodes will be created in. Defaults to global.
    VirtualClusterConfig ClusterVirtualClusterConfigArgs
    Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
    clusterConfig ClusterClusterConfig
    Allows you to configure various aspects of the cluster. Structure defined below.
    gracefulDecommissionTimeout String
    The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
    labels Map<String,String>
    The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
    name String
    The name of the cluster, unique within the project and zone.


    project String
    The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.
    region String
    The region in which the cluster and associated nodes will be created in. Defaults to global.
    virtualClusterConfig ClusterVirtualClusterConfig
    Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
    clusterConfig ClusterClusterConfig
    Allows you to configure various aspects of the cluster. Structure defined below.
    gracefulDecommissionTimeout string
    The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
    labels {[key: string]: string}
    The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
    name string
    The name of the cluster, unique within the project and zone.


    project string
    The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.
    region string
    The region in which the cluster and associated nodes will be created in. Defaults to global.
    virtualClusterConfig ClusterVirtualClusterConfig
    Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
    cluster_config ClusterClusterConfigArgs
    Allows you to configure various aspects of the cluster. Structure defined below.
    graceful_decommission_timeout str
    The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
    labels Mapping[str, str]
    The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
    name str
    The name of the cluster, unique within the project and zone.


    project str
    The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.
    region str
    The region in which the cluster and associated nodes will be created in. Defaults to global.
    virtual_cluster_config ClusterVirtualClusterConfigArgs
    Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
    clusterConfig Property Map
    Allows you to configure various aspects of the cluster. Structure defined below.
    gracefulDecommissionTimeout String
    The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
    labels Map<String>
    The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
    name String
    The name of the cluster, unique within the project and zone.


    project String
    The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.
    region String
    The region in which the cluster and associated nodes will be created in. Defaults to global.
    virtualClusterConfig Property Map
    Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Cluster resource produces the following output properties:

    EffectiveLabels Dictionary<string, string>
    The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.
    Id string
    The provider-assigned unique ID for this managed resource.
    PulumiLabels Dictionary<string, string>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    EffectiveLabels map[string]string
    The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.
    Id string
    The provider-assigned unique ID for this managed resource.
    PulumiLabels map[string]string
    The combination of labels configured directly on the resource and default labels configured on the provider.
    effectiveLabels Map<String,String>
    The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.
    id String
    The provider-assigned unique ID for this managed resource.
    pulumiLabels Map<String,String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    effectiveLabels {[key: string]: string}
    The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.
    id string
    The provider-assigned unique ID for this managed resource.
    pulumiLabels {[key: string]: string}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    effective_labels Mapping[str, str]
    The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.
    id str
    The provider-assigned unique ID for this managed resource.
    pulumi_labels Mapping[str, str]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    effectiveLabels Map<String>
    The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.
    id String
    The provider-assigned unique ID for this managed resource.
    pulumiLabels Map<String>
    The combination of labels configured directly on the resource and default labels configured on the provider.

    Look up Existing Cluster Resource

    Get an existing Cluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: ClusterState, opts?: CustomResourceOptions): Cluster
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            cluster_config: Optional[ClusterClusterConfigArgs] = None,
            effective_labels: Optional[Mapping[str, str]] = None,
            graceful_decommission_timeout: Optional[str] = None,
            labels: Optional[Mapping[str, str]] = None,
            name: Optional[str] = None,
            project: Optional[str] = None,
            pulumi_labels: Optional[Mapping[str, str]] = None,
            region: Optional[str] = None,
            virtual_cluster_config: Optional[ClusterVirtualClusterConfigArgs] = None) -> Cluster
    func GetCluster(ctx *Context, name string, id IDInput, state *ClusterState, opts ...ResourceOption) (*Cluster, error)
    public static Cluster Get(string name, Input<string> id, ClusterState? state, CustomResourceOptions? opts = null)
    public static Cluster get(String name, Output<String> id, ClusterState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    ClusterConfig ClusterClusterConfig
    Allows you to configure various aspects of the cluster. Structure defined below.
    EffectiveLabels Dictionary<string, string>
    The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.
    GracefulDecommissionTimeout string
    The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
    Labels Dictionary<string, string>
    The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
    Name string
    The name of the cluster, unique within the project and zone.


    Project string
    The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.
    PulumiLabels Dictionary<string, string>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    Region string
    The region in which the cluster and associated nodes will be created in. Defaults to global.
    VirtualClusterConfig ClusterVirtualClusterConfig
    Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
    ClusterConfig ClusterClusterConfigArgs
    Allows you to configure various aspects of the cluster. Structure defined below.
    EffectiveLabels map[string]string
    The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.
    GracefulDecommissionTimeout string
    The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
    Labels map[string]string
    The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
    Name string
    The name of the cluster, unique within the project and zone.


    Project string
    The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.
    PulumiLabels map[string]string
    The combination of labels configured directly on the resource and default labels configured on the provider.
    Region string
    The region in which the cluster and associated nodes will be created in. Defaults to global.
    VirtualClusterConfig ClusterVirtualClusterConfigArgs
    Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
    clusterConfig ClusterClusterConfig
    Allows you to configure various aspects of the cluster. Structure defined below.
    effectiveLabels Map<String,String>
    The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.
    gracefulDecommissionTimeout String
    The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
    labels Map<String,String>
    The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
    name String
    The name of the cluster, unique within the project and zone.


    project String
    The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.
    pulumiLabels Map<String,String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    region String
    The region in which the cluster and associated nodes will be created in. Defaults to global.
    virtualClusterConfig ClusterVirtualClusterConfig
    Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
    clusterConfig ClusterClusterConfig
    Allows you to configure various aspects of the cluster. Structure defined below.
    effectiveLabels {[key: string]: string}
    The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.
    gracefulDecommissionTimeout string
    The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
    labels {[key: string]: string}
    The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
    name string
    The name of the cluster, unique within the project and zone.


    project string
    The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.
    pulumiLabels {[key: string]: string}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    region string
    The region in which the cluster and associated nodes will be created in. Defaults to global.
    virtualClusterConfig ClusterVirtualClusterConfig
    Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
    cluster_config ClusterClusterConfigArgs
    Allows you to configure various aspects of the cluster. Structure defined below.
    effective_labels Mapping[str, str]
    The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.
    graceful_decommission_timeout str
    The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
    labels Mapping[str, str]
    The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
    name str
    The name of the cluster, unique within the project and zone.


    project str
    The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.
    pulumi_labels Mapping[str, str]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    region str
    The region in which the cluster and associated nodes will be created in. Defaults to global.
    virtual_cluster_config ClusterVirtualClusterConfigArgs
    Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.
    clusterConfig Property Map
    Allows you to configure various aspects of the cluster. Structure defined below.
    effectiveLabels Map<String>
    The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.
    gracefulDecommissionTimeout String
    The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply
    labels Map<String>
    The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
    name String
    The name of the cluster, unique within the project and zone.


    project String
    The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.
    pulumiLabels Map<String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    region String
    The region in which the cluster and associated nodes will be created in. Defaults to global.
    virtualClusterConfig Property Map
    Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below.

    Supporting Types

    ClusterClusterConfig, ClusterClusterConfigArgs

    AutoscalingConfig ClusterClusterConfigAutoscalingConfig
    The autoscaling policy config associated with the cluster. Note that once set, if autoscaling_config is the only field set in cluster_config, it can only be removed by setting policy_uri = "", rather than removing the whole block. Structure defined below.
    AuxiliaryNodeGroups List<ClusterClusterConfigAuxiliaryNodeGroup>
    A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. Structure defined below.
    Bucket string
    The name of the cloud storage bucket ultimately used to house the staging data for the cluster. If staging_bucket is specified, it will contain this value, otherwise it will be the auto generated name.
    DataprocMetricConfig ClusterClusterConfigDataprocMetricConfig
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below.
    EncryptionConfig ClusterClusterConfigEncryptionConfig
    The Customer managed encryption keys settings for the cluster. Structure defined below.
    EndpointConfig ClusterClusterConfigEndpointConfig
    The config settings for port access on the cluster. Structure defined below.
    GceClusterConfig ClusterClusterConfigGceClusterConfig
    Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
    InitializationActions List<ClusterClusterConfigInitializationAction>
    Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
    LifecycleConfig ClusterClusterConfigLifecycleConfig
    The settings for auto deletion cluster schedule. Structure defined below.
    MasterConfig ClusterClusterConfigMasterConfig
    The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
    MetastoreConfig ClusterClusterConfigMetastoreConfig
    The config setting for metastore service with the cluster. Structure defined below.


    PreemptibleWorkerConfig ClusterClusterConfigPreemptibleWorkerConfig
    The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.

    • NOTE : preemptible_worker_config is an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
    SecurityConfig ClusterClusterConfigSecurityConfig
    Security related configuration. Structure defined below.
    SoftwareConfig ClusterClusterConfigSoftwareConfig
    The config settings for software inside the cluster. Structure defined below.
    StagingBucket string
    The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
    TempBucket string
    The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a temp_bucket then GCP will auto create / assign one for you.
    WorkerConfig ClusterClusterConfigWorkerConfig
    The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
    AutoscalingConfig ClusterClusterConfigAutoscalingConfig
    The autoscaling policy config associated with the cluster. Note that once set, if autoscaling_config is the only field set in cluster_config, it can only be removed by setting policy_uri = "", rather than removing the whole block. Structure defined below.
    AuxiliaryNodeGroups []ClusterClusterConfigAuxiliaryNodeGroup
    A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. Structure defined below.
    Bucket string
    The name of the cloud storage bucket ultimately used to house the staging data for the cluster. If staging_bucket is specified, it will contain this value, otherwise it will be the auto generated name.
    DataprocMetricConfig ClusterClusterConfigDataprocMetricConfig
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below.
    EncryptionConfig ClusterClusterConfigEncryptionConfig
    The Customer managed encryption keys settings for the cluster. Structure defined below.
    EndpointConfig ClusterClusterConfigEndpointConfig
    The config settings for port access on the cluster. Structure defined below.
    GceClusterConfig ClusterClusterConfigGceClusterConfig
    Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
    InitializationActions []ClusterClusterConfigInitializationAction
    Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
    LifecycleConfig ClusterClusterConfigLifecycleConfig
    The settings for auto deletion cluster schedule. Structure defined below.
    MasterConfig ClusterClusterConfigMasterConfig
    The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
    MetastoreConfig ClusterClusterConfigMetastoreConfig
    The config setting for metastore service with the cluster. Structure defined below.


    PreemptibleWorkerConfig ClusterClusterConfigPreemptibleWorkerConfig
    The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.

    • NOTE : preemptible_worker_config is an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
    SecurityConfig ClusterClusterConfigSecurityConfig
    Security related configuration. Structure defined below.
    SoftwareConfig ClusterClusterConfigSoftwareConfig
    The config settings for software inside the cluster. Structure defined below.
    StagingBucket string
    The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
    TempBucket string
    The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a temp_bucket then GCP will auto create / assign one for you.
    WorkerConfig ClusterClusterConfigWorkerConfig
    The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
    autoscalingConfig ClusterClusterConfigAutoscalingConfig
    The autoscaling policy config associated with the cluster. Note that once set, if autoscaling_config is the only field set in cluster_config, it can only be removed by setting policy_uri = "", rather than removing the whole block. Structure defined below.
    auxiliaryNodeGroups List<ClusterClusterConfigAuxiliaryNodeGroup>
    A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. Structure defined below.
    bucket String
    The name of the cloud storage bucket ultimately used to house the staging data for the cluster. If staging_bucket is specified, it will contain this value, otherwise it will be the auto generated name.
    dataprocMetricConfig ClusterClusterConfigDataprocMetricConfig
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below.
    encryptionConfig ClusterClusterConfigEncryptionConfig
    The Customer managed encryption keys settings for the cluster. Structure defined below.
    endpointConfig ClusterClusterConfigEndpointConfig
    The config settings for port access on the cluster. Structure defined below.
    gceClusterConfig ClusterClusterConfigGceClusterConfig
    Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
    initializationActions List<ClusterClusterConfigInitializationAction>
    Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
    lifecycleConfig ClusterClusterConfigLifecycleConfig
    The settings for auto deletion cluster schedule. Structure defined below.
    masterConfig ClusterClusterConfigMasterConfig
    The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
    metastoreConfig ClusterClusterConfigMetastoreConfig
    The config setting for metastore service with the cluster. Structure defined below.


    preemptibleWorkerConfig ClusterClusterConfigPreemptibleWorkerConfig
    The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.

    • NOTE : preemptible_worker_config is an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
    securityConfig ClusterClusterConfigSecurityConfig
    Security related configuration. Structure defined below.
    softwareConfig ClusterClusterConfigSoftwareConfig
    The config settings for software inside the cluster. Structure defined below.
    stagingBucket String
    The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
    tempBucket String
    The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a temp_bucket then GCP will auto create / assign one for you.
    workerConfig ClusterClusterConfigWorkerConfig
    The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
    autoscalingConfig ClusterClusterConfigAutoscalingConfig
    The autoscaling policy config associated with the cluster. Note that once set, if autoscaling_config is the only field set in cluster_config, it can only be removed by setting policy_uri = "", rather than removing the whole block. Structure defined below.
    auxiliaryNodeGroups ClusterClusterConfigAuxiliaryNodeGroup[]
    A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. Structure defined below.
    bucket string
    The name of the cloud storage bucket ultimately used to house the staging data for the cluster. If staging_bucket is specified, it will contain this value, otherwise it will be the auto generated name.
    dataprocMetricConfig ClusterClusterConfigDataprocMetricConfig
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below.
    encryptionConfig ClusterClusterConfigEncryptionConfig
    The Customer managed encryption keys settings for the cluster. Structure defined below.
    endpointConfig ClusterClusterConfigEndpointConfig
    The config settings for port access on the cluster. Structure defined below.
    gceClusterConfig ClusterClusterConfigGceClusterConfig
    Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
    initializationActions ClusterClusterConfigInitializationAction[]
    Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
    lifecycleConfig ClusterClusterConfigLifecycleConfig
    The settings for auto deletion cluster schedule. Structure defined below.
    masterConfig ClusterClusterConfigMasterConfig
    The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
    metastoreConfig ClusterClusterConfigMetastoreConfig
    The config setting for metastore service with the cluster. Structure defined below.


    preemptibleWorkerConfig ClusterClusterConfigPreemptibleWorkerConfig
    The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.

    • NOTE : preemptible_worker_config is an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
    securityConfig ClusterClusterConfigSecurityConfig
    Security related configuration. Structure defined below.
    softwareConfig ClusterClusterConfigSoftwareConfig
    The config settings for software inside the cluster. Structure defined below.
    stagingBucket string
    The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
    tempBucket string
    The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a temp_bucket then GCP will auto create / assign one for you.
    workerConfig ClusterClusterConfigWorkerConfig
    The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
    autoscaling_config ClusterClusterConfigAutoscalingConfig
    The autoscaling policy config associated with the cluster. Note that once set, if autoscaling_config is the only field set in cluster_config, it can only be removed by setting policy_uri = "", rather than removing the whole block. Structure defined below.
    auxiliary_node_groups Sequence[ClusterClusterConfigAuxiliaryNodeGroup]
    A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. Structure defined below.
    bucket str
    The name of the cloud storage bucket ultimately used to house the staging data for the cluster. If staging_bucket is specified, it will contain this value, otherwise it will be the auto generated name.
    dataproc_metric_config ClusterClusterConfigDataprocMetricConfig
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below.
    encryption_config ClusterClusterConfigEncryptionConfig
    The Customer managed encryption keys settings for the cluster. Structure defined below.
    endpoint_config ClusterClusterConfigEndpointConfig
    The config settings for port access on the cluster. Structure defined below.
    gce_cluster_config ClusterClusterConfigGceClusterConfig
    Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
    initialization_actions Sequence[ClusterClusterConfigInitializationAction]
    Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
    lifecycle_config ClusterClusterConfigLifecycleConfig
    The settings for auto deletion cluster schedule. Structure defined below.
    master_config ClusterClusterConfigMasterConfig
    The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
    metastore_config ClusterClusterConfigMetastoreConfig
    The config setting for metastore service with the cluster. Structure defined below.


    preemptible_worker_config ClusterClusterConfigPreemptibleWorkerConfig
    The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.

    • NOTE : preemptible_worker_config is an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
    security_config ClusterClusterConfigSecurityConfig
    Security related configuration. Structure defined below.
    software_config ClusterClusterConfigSoftwareConfig
    The config settings for software inside the cluster. Structure defined below.
    staging_bucket str
    The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
    temp_bucket str
    The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a temp_bucket then GCP will auto create / assign one for you.
    worker_config ClusterClusterConfigWorkerConfig
    The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.
    autoscalingConfig Property Map
    The autoscaling policy config associated with the cluster. Note that once set, if autoscaling_config is the only field set in cluster_config, it can only be removed by setting policy_uri = "", rather than removing the whole block. Structure defined below.
    auxiliaryNodeGroups List<Property Map>
    A Dataproc NodeGroup resource is a group of Dataproc cluster nodes that execute an assigned role. Structure defined below.
    bucket String
    The name of the cloud storage bucket ultimately used to house the staging data for the cluster. If staging_bucket is specified, it will contain this value, otherwise it will be the auto generated name.
    dataprocMetricConfig Property Map
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below.
    encryptionConfig Property Map
    The Customer managed encryption keys settings for the cluster. Structure defined below.
    endpointConfig Property Map
    The config settings for port access on the cluster. Structure defined below.
    gceClusterConfig Property Map
    Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.
    initializationActions List<Property Map>
    Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.
    lifecycleConfig Property Map
    The settings for auto deletion cluster schedule. Structure defined below.
    masterConfig Property Map
    The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.
    metastoreConfig Property Map
    The config setting for metastore service with the cluster. Structure defined below.


    preemptibleWorkerConfig Property Map
    The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.

    • NOTE : preemptible_worker_config is an alias for the api's secondaryWorkerConfig. The name doesn't necessarily mean it is preemptible and is named as such for legacy/compatibility reasons.
    securityConfig Property Map
    Security related configuration. Structure defined below.
    softwareConfig Property Map
    The config settings for software inside the cluster. Structure defined below.
    stagingBucket String
    The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
    tempBucket String
    The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a temp_bucket then GCP will auto create / assign one for you.
    workerConfig Property Map
    The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.

    ClusterClusterConfigAutoscalingConfig, ClusterClusterConfigAutoscalingConfigArgs

    PolicyUri string

    The autoscaling policy used by the cluster.

    Only resource names including projectid and location (region) are valid. Examples:

    https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] Note that the policy must be in the same project and Cloud Dataproc region.


    PolicyUri string

    The autoscaling policy used by the cluster.

    Only resource names including projectid and location (region) are valid. Examples:

    https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] Note that the policy must be in the same project and Cloud Dataproc region.


    policyUri String

    The autoscaling policy used by the cluster.

    Only resource names including projectid and location (region) are valid. Examples:

    https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] Note that the policy must be in the same project and Cloud Dataproc region.


    policyUri string

    The autoscaling policy used by the cluster.

    Only resource names including projectid and location (region) are valid. Examples:

    https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] Note that the policy must be in the same project and Cloud Dataproc region.


    policy_uri str

    The autoscaling policy used by the cluster.

    Only resource names including projectid and location (region) are valid. Examples:

    https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] Note that the policy must be in the same project and Cloud Dataproc region.


    policyUri String

    The autoscaling policy used by the cluster.

    Only resource names including projectid and location (region) are valid. Examples:

    https://www.googleapis.com/compute/v1/projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] Note that the policy must be in the same project and Cloud Dataproc region.


    ClusterClusterConfigAuxiliaryNodeGroup, ClusterClusterConfigAuxiliaryNodeGroupArgs

    NodeGroups List<ClusterClusterConfigAuxiliaryNodeGroupNodeGroup>
    Node group configuration.
    NodeGroupId string
    A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
    NodeGroups []ClusterClusterConfigAuxiliaryNodeGroupNodeGroup
    Node group configuration.
    NodeGroupId string
    A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
    nodeGroups List<ClusterClusterConfigAuxiliaryNodeGroupNodeGroup>
    Node group configuration.
    nodeGroupId String
    A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
    nodeGroups ClusterClusterConfigAuxiliaryNodeGroupNodeGroup[]
    Node group configuration.
    nodeGroupId string
    A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
    node_groups Sequence[ClusterClusterConfigAuxiliaryNodeGroupNodeGroup]
    Node group configuration.
    node_group_id str
    A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
    nodeGroups List<Property Map>
    Node group configuration.
    nodeGroupId String
    A node group ID. Generated if not specified. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.

    ClusterClusterConfigAuxiliaryNodeGroupNodeGroup, ClusterClusterConfigAuxiliaryNodeGroupNodeGroupArgs

    Roles List<string>
    Node group roles. One of "DRIVER".
    Name string
    The name of the cluster, unique within the project and zone.


    NodeGroupConfig ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfig
    The node group instance group configuration.
    Roles []string
    Node group roles. One of "DRIVER".
    Name string
    The name of the cluster, unique within the project and zone.


    NodeGroupConfig ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfig
    The node group instance group configuration.
    roles List<String>
    Node group roles. One of "DRIVER".
    name String
    The name of the cluster, unique within the project and zone.


    nodeGroupConfig ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfig
    The node group instance group configuration.
    roles string[]
    Node group roles. One of "DRIVER".
    name string
    The name of the cluster, unique within the project and zone.


    nodeGroupConfig ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfig
    The node group instance group configuration.
    roles Sequence[str]
    Node group roles. One of "DRIVER".
    name str
    The name of the cluster, unique within the project and zone.


    node_group_config ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfig
    The node group instance group configuration.
    roles List<String>
    Node group roles. One of "DRIVER".
    name String
    The name of the cluster, unique within the project and zone.


    nodeGroupConfig Property Map
    The node group instance group configuration.

    ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfig, ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigArgs

    Accelerators List<ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAccelerator>
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
    DiskConfig ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfig
    Disk Config
    InstanceNames List<string>
    List of auxiliary node group instance names which have been assigned to the cluster.
    MachineType string
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    MinCpuPlatform string
    The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    NumInstances int
    Specifies the number of master nodes to create. Please set a number greater than 0. Node Group must have at least 1 instance.
    Accelerators []ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAccelerator
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
    DiskConfig ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfig
    Disk Config
    InstanceNames []string
    List of auxiliary node group instance names which have been assigned to the cluster.
    MachineType string
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    MinCpuPlatform string
    The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    NumInstances int
    Specifies the number of master nodes to create. Please set a number greater than 0. Node Group must have at least 1 instance.
    accelerators List<ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAccelerator>
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
    diskConfig ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfig
    Disk Config
    instanceNames List<String>
    List of auxiliary node group instance names which have been assigned to the cluster.
    machineType String
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    minCpuPlatform String
    The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    numInstances Integer
    Specifies the number of master nodes to create. Please set a number greater than 0. Node Group must have at least 1 instance.
    accelerators ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAccelerator[]
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
    diskConfig ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfig
    Disk Config
    instanceNames string[]
    List of auxiliary node group instance names which have been assigned to the cluster.
    machineType string
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    minCpuPlatform string
    The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    numInstances number
    Specifies the number of master nodes to create. Please set a number greater than 0. Node Group must have at least 1 instance.
    accelerators Sequence[ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAccelerator]
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
    disk_config ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfig
    Disk Config
    instance_names Sequence[str]
    List of auxiliary node group instance names which have been assigned to the cluster.
    machine_type str
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    min_cpu_platform str
    The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    num_instances int
    Specifies the number of master nodes to create. Please set a number greater than 0. Node Group must have at least 1 instance.
    accelerators List<Property Map>
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
    diskConfig Property Map
    Disk Config
    instanceNames List<String>
    List of auxiliary node group instance names which have been assigned to the cluster.
    machineType String
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    minCpuPlatform String
    The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    numInstances Number
    Specifies the number of master nodes to create. Please set a number greater than 0. Node Group must have at least 1 instance.

    ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAccelerator, ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigAcceleratorArgs

    AcceleratorCount int
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    AcceleratorType string
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    AcceleratorCount int
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    AcceleratorType string
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    acceleratorCount Integer
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    acceleratorType String
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    acceleratorCount number
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    acceleratorType string
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    accelerator_count int
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    accelerator_type str
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    acceleratorCount Number
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    acceleratorType String
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.

    ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfig, ClusterClusterConfigAuxiliaryNodeGroupNodeGroupNodeGroupConfigDiskConfigArgs

    BootDiskSizeGb int
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    BootDiskType string
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    NumLocalSsds int
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    BootDiskSizeGb int
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    BootDiskType string
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    NumLocalSsds int
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    bootDiskSizeGb Integer
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    bootDiskType String
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    numLocalSsds Integer
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    bootDiskSizeGb number
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    bootDiskType string
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    numLocalSsds number
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    boot_disk_size_gb int
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    boot_disk_type str
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    num_local_ssds int
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    bootDiskSizeGb Number
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    bootDiskType String
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    numLocalSsds Number
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.

    ClusterClusterConfigDataprocMetricConfig, ClusterClusterConfigDataprocMetricConfigArgs

    metrics List<Property Map>
    Metrics sources to enable.

    ClusterClusterConfigDataprocMetricConfigMetric, ClusterClusterConfigDataprocMetricConfigMetricArgs

    MetricSource string
    A source for the collection of Dataproc OSS metrics (see available OSS metrics).
    MetricOverrides List<string>
    One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course.


    MetricSource string
    A source for the collection of Dataproc OSS metrics (see available OSS metrics).
    MetricOverrides []string
    One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course.


    metricSource String
    A source for the collection of Dataproc OSS metrics (see available OSS metrics).
    metricOverrides List<String>
    One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course.


    metricSource string
    A source for the collection of Dataproc OSS metrics (see available OSS metrics).
    metricOverrides string[]
    One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course.


    metric_source str
    A source for the collection of Dataproc OSS metrics (see available OSS metrics).
    metric_overrides Sequence[str]
    One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course.


    metricSource String
    A source for the collection of Dataproc OSS metrics (see available OSS metrics).
    metricOverrides List<String>
    One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course.


    ClusterClusterConfigEncryptionConfig, ClusterClusterConfigEncryptionConfigArgs

    KmsKeyName string
    The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.


    KmsKeyName string
    The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.


    kmsKeyName String
    The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.


    kmsKeyName string
    The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.


    kms_key_name str
    The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.


    kmsKeyName String
    The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.


    ClusterClusterConfigEndpointConfig, ClusterClusterConfigEndpointConfigArgs

    EnableHttpPortAccess bool
    The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
    HttpPorts Dictionary<string, object>
    The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
    EnableHttpPortAccess bool
    The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
    HttpPorts map[string]interface{}
    The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
    enableHttpPortAccess Boolean
    The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
    httpPorts Map<String,Object>
    The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
    enableHttpPortAccess boolean
    The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
    httpPorts {[key: string]: any}
    The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
    enable_http_port_access bool
    The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
    http_ports Mapping[str, Any]
    The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
    enableHttpPortAccess Boolean
    The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.
    httpPorts Map<Any>
    The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.

    ClusterClusterConfigGceClusterConfig, ClusterClusterConfigGceClusterConfigArgs

    InternalIpOnly bool
    By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.
    Metadata Dictionary<string, string>
    A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
    Network string
    The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.
    NodeGroupAffinity ClusterClusterConfigGceClusterConfigNodeGroupAffinity
    Node Group Affinity for sole-tenant clusters.
    ReservationAffinity ClusterClusterConfigGceClusterConfigReservationAffinity
    Reservation Affinity for consuming zonal reservation.
    ServiceAccount string
    The service account to be used by the Node VMs. If not specified, the "default" service account is used.
    ServiceAccountScopes List<string>
    The set of Google API scopes to be made available on all of the node VMs under the service_account specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope. See a complete list of scopes here.
    ShieldedInstanceConfig ClusterClusterConfigGceClusterConfigShieldedInstanceConfig
    Shielded Instance Config for clusters using Compute Engine Shielded VMs.


    Subnetwork string
    The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with network.
    Tags List<string>
    The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
    Zone string
    The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If region is set to 'global' (default) then zone is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as cluster_config.master_config.machine_type and cluster_config.worker_config.machine_type.
    InternalIpOnly bool
    By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.
    Metadata map[string]string
    A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
    Network string
    The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.
    NodeGroupAffinity ClusterClusterConfigGceClusterConfigNodeGroupAffinity
    Node Group Affinity for sole-tenant clusters.
    ReservationAffinity ClusterClusterConfigGceClusterConfigReservationAffinity
    Reservation Affinity for consuming zonal reservation.
    ServiceAccount string
    The service account to be used by the Node VMs. If not specified, the "default" service account is used.
    ServiceAccountScopes []string
    The set of Google API scopes to be made available on all of the node VMs under the service_account specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope. See a complete list of scopes here.
    ShieldedInstanceConfig ClusterClusterConfigGceClusterConfigShieldedInstanceConfig
    Shielded Instance Config for clusters using Compute Engine Shielded VMs.


    Subnetwork string
    The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with network.
    Tags []string
    The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
    Zone string
    The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If region is set to 'global' (default) then zone is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as cluster_config.master_config.machine_type and cluster_config.worker_config.machine_type.
    internalIpOnly Boolean
    By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.
    metadata Map<String,String>
    A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
    network String
    The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.
    nodeGroupAffinity ClusterClusterConfigGceClusterConfigNodeGroupAffinity
    Node Group Affinity for sole-tenant clusters.
    reservationAffinity ClusterClusterConfigGceClusterConfigReservationAffinity
    Reservation Affinity for consuming zonal reservation.
    serviceAccount String
    The service account to be used by the Node VMs. If not specified, the "default" service account is used.
    serviceAccountScopes List<String>
    The set of Google API scopes to be made available on all of the node VMs under the service_account specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope. See a complete list of scopes here.
    shieldedInstanceConfig ClusterClusterConfigGceClusterConfigShieldedInstanceConfig
    Shielded Instance Config for clusters using Compute Engine Shielded VMs.


    subnetwork String
    The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with network.
    tags List<String>
    The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
    zone String
    The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If region is set to 'global' (default) then zone is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as cluster_config.master_config.machine_type and cluster_config.worker_config.machine_type.
    internalIpOnly boolean
    By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.
    metadata {[key: string]: string}
    A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
    network string
    The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.
    nodeGroupAffinity ClusterClusterConfigGceClusterConfigNodeGroupAffinity
    Node Group Affinity for sole-tenant clusters.
    reservationAffinity ClusterClusterConfigGceClusterConfigReservationAffinity
    Reservation Affinity for consuming zonal reservation.
    serviceAccount string
    The service account to be used by the Node VMs. If not specified, the "default" service account is used.
    serviceAccountScopes string[]
    The set of Google API scopes to be made available on all of the node VMs under the service_account specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope. See a complete list of scopes here.
    shieldedInstanceConfig ClusterClusterConfigGceClusterConfigShieldedInstanceConfig
    Shielded Instance Config for clusters using Compute Engine Shielded VMs.


    subnetwork string
    The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with network.
    tags string[]
    The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
    zone string
    The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If region is set to 'global' (default) then zone is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as cluster_config.master_config.machine_type and cluster_config.worker_config.machine_type.
    internal_ip_only bool
    By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.
    metadata Mapping[str, str]
    A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
    network str
    The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.
    node_group_affinity ClusterClusterConfigGceClusterConfigNodeGroupAffinity
    Node Group Affinity for sole-tenant clusters.
    reservation_affinity ClusterClusterConfigGceClusterConfigReservationAffinity
    Reservation Affinity for consuming zonal reservation.
    service_account str
    The service account to be used by the Node VMs. If not specified, the "default" service account is used.
    service_account_scopes Sequence[str]
    The set of Google API scopes to be made available on all of the node VMs under the service_account specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope. See a complete list of scopes here.
    shielded_instance_config ClusterClusterConfigGceClusterConfigShieldedInstanceConfig
    Shielded Instance Config for clusters using Compute Engine Shielded VMs.


    subnetwork str
    The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with network.
    tags Sequence[str]
    The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
    zone str
    The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If region is set to 'global' (default) then zone is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as cluster_config.master_config.machine_type and cluster_config.worker_config.machine_type.
    internalIpOnly Boolean
    By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.
    metadata Map<String>
    A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata).
    network String
    The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.
    nodeGroupAffinity Property Map
    Node Group Affinity for sole-tenant clusters.
    reservationAffinity Property Map
    Reservation Affinity for consuming zonal reservation.
    serviceAccount String
    The service account to be used by the Node VMs. If not specified, the "default" service account is used.
    serviceAccountScopes List<String>
    The set of Google API scopes to be made available on all of the node VMs under the service_account specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope. See a complete list of scopes here.
    shieldedInstanceConfig Property Map
    Shielded Instance Config for clusters using Compute Engine Shielded VMs.


    subnetwork String
    The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with network.
    tags List<String>
    The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.
    zone String
    The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If region is set to 'global' (default) then zone is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as cluster_config.master_config.machine_type and cluster_config.worker_config.machine_type.

    ClusterClusterConfigGceClusterConfigNodeGroupAffinity, ClusterClusterConfigGceClusterConfigNodeGroupAffinityArgs

    NodeGroupUri string
    The URI of a sole-tenant node group resource that the cluster will be created on.
    NodeGroupUri string
    The URI of a sole-tenant node group resource that the cluster will be created on.
    nodeGroupUri String
    The URI of a sole-tenant node group resource that the cluster will be created on.
    nodeGroupUri string
    The URI of a sole-tenant node group resource that the cluster will be created on.
    node_group_uri str
    The URI of a sole-tenant node group resource that the cluster will be created on.
    nodeGroupUri String
    The URI of a sole-tenant node group resource that the cluster will be created on.

    ClusterClusterConfigGceClusterConfigReservationAffinity, ClusterClusterConfigGceClusterConfigReservationAffinityArgs

    ConsumeReservationType string
    Corresponds to the type of reservation consumption.
    Key string
    Corresponds to the label key of reservation resource.
    Values List<string>
    Corresponds to the label values of reservation resource.
    ConsumeReservationType string
    Corresponds to the type of reservation consumption.
    Key string
    Corresponds to the label key of reservation resource.
    Values []string
    Corresponds to the label values of reservation resource.
    consumeReservationType String
    Corresponds to the type of reservation consumption.
    key String
    Corresponds to the label key of reservation resource.
    values List<String>
    Corresponds to the label values of reservation resource.
    consumeReservationType string
    Corresponds to the type of reservation consumption.
    key string
    Corresponds to the label key of reservation resource.
    values string[]
    Corresponds to the label values of reservation resource.
    consume_reservation_type str
    Corresponds to the type of reservation consumption.
    key str
    Corresponds to the label key of reservation resource.
    values Sequence[str]
    Corresponds to the label values of reservation resource.
    consumeReservationType String
    Corresponds to the type of reservation consumption.
    key String
    Corresponds to the label key of reservation resource.
    values List<String>
    Corresponds to the label values of reservation resource.

    ClusterClusterConfigGceClusterConfigShieldedInstanceConfig, ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs

    EnableIntegrityMonitoring bool
    Defines whether instances have integrity monitoring enabled.


    EnableSecureBoot bool
    Defines whether instances have Secure Boot enabled.
    EnableVtpm bool
    Defines whether instances have the vTPM enabled.
    EnableIntegrityMonitoring bool
    Defines whether instances have integrity monitoring enabled.


    EnableSecureBoot bool
    Defines whether instances have Secure Boot enabled.
    EnableVtpm bool
    Defines whether instances have the vTPM enabled.
    enableIntegrityMonitoring Boolean
    Defines whether instances have integrity monitoring enabled.


    enableSecureBoot Boolean
    Defines whether instances have Secure Boot enabled.
    enableVtpm Boolean
    Defines whether instances have the vTPM enabled.
    enableIntegrityMonitoring boolean
    Defines whether instances have integrity monitoring enabled.


    enableSecureBoot boolean
    Defines whether instances have Secure Boot enabled.
    enableVtpm boolean
    Defines whether instances have the vTPM enabled.
    enable_integrity_monitoring bool
    Defines whether instances have integrity monitoring enabled.


    enable_secure_boot bool
    Defines whether instances have Secure Boot enabled.
    enable_vtpm bool
    Defines whether instances have the vTPM enabled.
    enableIntegrityMonitoring Boolean
    Defines whether instances have integrity monitoring enabled.


    enableSecureBoot Boolean
    Defines whether instances have Secure Boot enabled.
    enableVtpm Boolean
    Defines whether instances have the vTPM enabled.

    ClusterClusterConfigInitializationAction, ClusterClusterConfigInitializationActionArgs

    Script string
    The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
    TimeoutSec int
    The maximum duration (in seconds) which script is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).


    Script string
    The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
    TimeoutSec int
    The maximum duration (in seconds) which script is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).


    script String
    The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
    timeoutSec Integer
    The maximum duration (in seconds) which script is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).


    script string
    The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
    timeoutSec number
    The maximum duration (in seconds) which script is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).


    script str
    The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
    timeout_sec int
    The maximum duration (in seconds) which script is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).


    script String
    The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.
    timeoutSec Number
    The maximum duration (in seconds) which script is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).


    ClusterClusterConfigLifecycleConfig, ClusterClusterConfigLifecycleConfigArgs

    AutoDeleteTime string
    The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".


    IdleDeleteTtl string
    The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
    IdleStartTime string
    Time when the cluster became idle (most recent job finished) and became eligible for deletion due to idleness.
    AutoDeleteTime string
    The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".


    IdleDeleteTtl string
    The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
    IdleStartTime string
    Time when the cluster became idle (most recent job finished) and became eligible for deletion due to idleness.
    autoDeleteTime String
    The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".


    idleDeleteTtl String
    The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
    idleStartTime String
    Time when the cluster became idle (most recent job finished) and became eligible for deletion due to idleness.
    autoDeleteTime string
    The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".


    idleDeleteTtl string
    The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
    idleStartTime string
    Time when the cluster became idle (most recent job finished) and became eligible for deletion due to idleness.
    auto_delete_time str
    The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".


    idle_delete_ttl str
    The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
    idle_start_time str
    Time when the cluster became idle (most recent job finished) and became eligible for deletion due to idleness.
    autoDeleteTime String
    The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".


    idleDeleteTtl String
    The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
    idleStartTime String
    Time when the cluster became idle (most recent job finished) and became eligible for deletion due to idleness.

    ClusterClusterConfigMasterConfig, ClusterClusterConfigMasterConfigArgs

    Accelerators List<ClusterClusterConfigMasterConfigAccelerator>
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
    DiskConfig ClusterClusterConfigMasterConfigDiskConfig
    Disk Config
    ImageUri string
    The URI for the image to use for this worker. See the guide for more information.
    InstanceNames List<string>
    List of master instance names which have been assigned to the cluster.
    MachineType string
    The name of a Google Compute Engine machine type to create for the master. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    MinCpuPlatform string
    The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    NumInstances int
    Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1).
    Accelerators []ClusterClusterConfigMasterConfigAccelerator
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
    DiskConfig ClusterClusterConfigMasterConfigDiskConfig
    Disk Config
    ImageUri string
    The URI for the image to use for this worker. See the guide for more information.
    InstanceNames []string
    List of master instance names which have been assigned to the cluster.
    MachineType string
    The name of a Google Compute Engine machine type to create for the master. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    MinCpuPlatform string
    The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    NumInstances int
    Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1).
    accelerators List<ClusterClusterConfigMasterConfigAccelerator>
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
    diskConfig ClusterClusterConfigMasterConfigDiskConfig
    Disk Config
    imageUri String
    The URI for the image to use for this worker. See the guide for more information.
    instanceNames List<String>
    List of master instance names which have been assigned to the cluster.
    machineType String
    The name of a Google Compute Engine machine type to create for the master. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    minCpuPlatform String
    The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    numInstances Integer
    Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1).
    accelerators ClusterClusterConfigMasterConfigAccelerator[]
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
    diskConfig ClusterClusterConfigMasterConfigDiskConfig
    Disk Config
    imageUri string
    The URI for the image to use for this worker. See the guide for more information.
    instanceNames string[]
    List of master instance names which have been assigned to the cluster.
    machineType string
    The name of a Google Compute Engine machine type to create for the master. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    minCpuPlatform string
    The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    numInstances number
    Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1).
    accelerators Sequence[ClusterClusterConfigMasterConfigAccelerator]
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
    disk_config ClusterClusterConfigMasterConfigDiskConfig
    Disk Config
    image_uri str
    The URI for the image to use for this worker. See the guide for more information.
    instance_names Sequence[str]
    List of master instance names which have been assigned to the cluster.
    machine_type str
    The name of a Google Compute Engine machine type to create for the master. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    min_cpu_platform str
    The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    num_instances int
    Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1).
    accelerators List<Property Map>
    The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.
    diskConfig Property Map
    Disk Config
    imageUri String
    The URI for the image to use for this worker. See the guide for more information.
    instanceNames List<String>
    List of master instance names which have been assigned to the cluster.
    machineType String
    The name of a Google Compute Engine machine type to create for the master. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    minCpuPlatform String
    The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    numInstances Number
    Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1).

    ClusterClusterConfigMasterConfigAccelerator, ClusterClusterConfigMasterConfigAcceleratorArgs

    AcceleratorCount int
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    AcceleratorType string
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    AcceleratorCount int
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    AcceleratorType string
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    acceleratorCount Integer
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    acceleratorType String
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    acceleratorCount number
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    acceleratorType string
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    accelerator_count int
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    accelerator_type str
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    acceleratorCount Number
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    acceleratorType String
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.

    ClusterClusterConfigMasterConfigDiskConfig, ClusterClusterConfigMasterConfigDiskConfigArgs

    BootDiskSizeGb int
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    BootDiskType string
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    NumLocalSsds int
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    BootDiskSizeGb int
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    BootDiskType string
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    NumLocalSsds int
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    bootDiskSizeGb Integer
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    bootDiskType String
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    numLocalSsds Integer
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    bootDiskSizeGb number
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    bootDiskType string
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    numLocalSsds number
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    boot_disk_size_gb int
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    boot_disk_type str
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    num_local_ssds int
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    bootDiskSizeGb Number
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    bootDiskType String
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    numLocalSsds Number
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.

    ClusterClusterConfigMetastoreConfig, ClusterClusterConfigMetastoreConfigArgs

    DataprocMetastoreService string

    Resource name of an existing Dataproc Metastore service.

    Only resource names including projectid and location (region) are valid. Examples:

    projects/[projectId]/locations/[dataproc_region]/services/[service-name]

    DataprocMetastoreService string

    Resource name of an existing Dataproc Metastore service.

    Only resource names including projectid and location (region) are valid. Examples:

    projects/[projectId]/locations/[dataproc_region]/services/[service-name]

    dataprocMetastoreService String

    Resource name of an existing Dataproc Metastore service.

    Only resource names including projectid and location (region) are valid. Examples:

    projects/[projectId]/locations/[dataproc_region]/services/[service-name]

    dataprocMetastoreService string

    Resource name of an existing Dataproc Metastore service.

    Only resource names including projectid and location (region) are valid. Examples:

    projects/[projectId]/locations/[dataproc_region]/services/[service-name]

    dataproc_metastore_service str

    Resource name of an existing Dataproc Metastore service.

    Only resource names including projectid and location (region) are valid. Examples:

    projects/[projectId]/locations/[dataproc_region]/services/[service-name]

    dataprocMetastoreService String

    Resource name of an existing Dataproc Metastore service.

    Only resource names including projectid and location (region) are valid. Examples:

    projects/[projectId]/locations/[dataproc_region]/services/[service-name]

    ClusterClusterConfigPreemptibleWorkerConfig, ClusterClusterConfigPreemptibleWorkerConfigArgs

    DiskConfig ClusterClusterConfigPreemptibleWorkerConfigDiskConfig
    Disk Config
    InstanceFlexibilityPolicy ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicy
    Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
    InstanceNames List<string>
    List of preemptible instance names which have been assigned to the cluster.
    NumInstances int
    Specifies the number of preemptible nodes to create. Defaults to 0.
    Preemptibility string
    Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLE Accepted values are:

    • PREEMPTIBILITY_UNSPECIFIED
    • NON_PREEMPTIBLE
    • PREEMPTIBLE
    DiskConfig ClusterClusterConfigPreemptibleWorkerConfigDiskConfig
    Disk Config
    InstanceFlexibilityPolicy ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicy
    Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
    InstanceNames []string
    List of preemptible instance names which have been assigned to the cluster.
    NumInstances int
    Specifies the number of preemptible nodes to create. Defaults to 0.
    Preemptibility string
    Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLE Accepted values are:

    • PREEMPTIBILITY_UNSPECIFIED
    • NON_PREEMPTIBLE
    • PREEMPTIBLE
    diskConfig ClusterClusterConfigPreemptibleWorkerConfigDiskConfig
    Disk Config
    instanceFlexibilityPolicy ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicy
    Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
    instanceNames List<String>
    List of preemptible instance names which have been assigned to the cluster.
    numInstances Integer
    Specifies the number of preemptible nodes to create. Defaults to 0.
    preemptibility String
    Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLE Accepted values are:

    • PREEMPTIBILITY_UNSPECIFIED
    • NON_PREEMPTIBLE
    • PREEMPTIBLE
    diskConfig ClusterClusterConfigPreemptibleWorkerConfigDiskConfig
    Disk Config
    instanceFlexibilityPolicy ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicy
    Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
    instanceNames string[]
    List of preemptible instance names which have been assigned to the cluster.
    numInstances number
    Specifies the number of preemptible nodes to create. Defaults to 0.
    preemptibility string
    Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLE Accepted values are:

    • PREEMPTIBILITY_UNSPECIFIED
    • NON_PREEMPTIBLE
    • PREEMPTIBLE
    disk_config ClusterClusterConfigPreemptibleWorkerConfigDiskConfig
    Disk Config
    instance_flexibility_policy ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicy
    Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
    instance_names Sequence[str]
    List of preemptible instance names which have been assigned to the cluster.
    num_instances int
    Specifies the number of preemptible nodes to create. Defaults to 0.
    preemptibility str
    Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLE Accepted values are:

    • PREEMPTIBILITY_UNSPECIFIED
    • NON_PREEMPTIBLE
    • PREEMPTIBLE
    diskConfig Property Map
    Disk Config
    instanceFlexibilityPolicy Property Map
    Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
    instanceNames List<String>
    List of preemptible instance names which have been assigned to the cluster.
    numInstances Number
    Specifies the number of preemptible nodes to create. Defaults to 0.
    preemptibility String
    Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLE Accepted values are:

    • PREEMPTIBILITY_UNSPECIFIED
    • NON_PREEMPTIBLE
    • PREEMPTIBLE

    ClusterClusterConfigPreemptibleWorkerConfigDiskConfig, ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs

    BootDiskSizeGb int
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    BootDiskType string
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    NumLocalSsds int
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    BootDiskSizeGb int
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    BootDiskType string
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    NumLocalSsds int
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    bootDiskSizeGb Integer
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    bootDiskType String
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    numLocalSsds Integer
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    bootDiskSizeGb number
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    bootDiskType string
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    numLocalSsds number
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    boot_disk_size_gb int
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    boot_disk_type str
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    num_local_ssds int
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    bootDiskSizeGb Number
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    bootDiskType String
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    numLocalSsds Number
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.

    ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicy, ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyArgs

    instanceSelectionLists List<Property Map>
    List of instance selection options that the group will use when creating new VMs.
    instanceSelectionResults List<Property Map>
    A list of instance selection results in the group.

    ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionList, ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionListArgs

    MachineTypes List<string>
    Full machine-type names, e.g. "n1-standard-16".
    Rank int
    Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.


    MachineTypes []string
    Full machine-type names, e.g. "n1-standard-16".
    Rank int
    Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.


    machineTypes List<String>
    Full machine-type names, e.g. "n1-standard-16".
    rank Integer
    Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.


    machineTypes string[]
    Full machine-type names, e.g. "n1-standard-16".
    rank number
    Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.


    machine_types Sequence[str]
    Full machine-type names, e.g. "n1-standard-16".
    rank int
    Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.


    machineTypes List<String>
    Full machine-type names, e.g. "n1-standard-16".
    rank Number
    Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.


    ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResult, ClusterClusterConfigPreemptibleWorkerConfigInstanceFlexibilityPolicyInstanceSelectionResultArgs

    MachineType string
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    VmCount int
    Number of VM provisioned with the machine_type.
    MachineType string
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    VmCount int
    Number of VM provisioned with the machine_type.
    machineType String
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    vmCount Integer
    Number of VM provisioned with the machine_type.
    machineType string
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    vmCount number
    Number of VM provisioned with the machine_type.
    machine_type str
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    vm_count int
    Number of VM provisioned with the machine_type.
    machineType String
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    vmCount Number
    Number of VM provisioned with the machine_type.

    ClusterClusterConfigSecurityConfig, ClusterClusterConfigSecurityConfigArgs

    kerberosConfig Property Map
    Kerberos Configuration

    ClusterClusterConfigSecurityConfigKerberosConfig, ClusterClusterConfigSecurityConfigKerberosConfigArgs

    KmsKeyUri string
    The URI of the KMS key used to encrypt various sensitive files.
    RootPrincipalPasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the root principal password.
    CrossRealmTrustAdminServer string
    The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    CrossRealmTrustKdc string
    The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    CrossRealmTrustRealm string
    The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
    CrossRealmTrustSharedPasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
    EnableKerberos bool
    Flag to indicate whether to Kerberize the cluster.
    KdcDbKeyUri string
    The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
    KeyPasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
    KeystorePasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
    KeystoreUri string
    The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    Realm string
    The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
    TgtLifetimeHours int
    The lifetime of the ticket granting ticket, in hours.
    TruststorePasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
    TruststoreUri string
    The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.


    KmsKeyUri string
    The URI of the KMS key used to encrypt various sensitive files.
    RootPrincipalPasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the root principal password.
    CrossRealmTrustAdminServer string
    The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    CrossRealmTrustKdc string
    The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    CrossRealmTrustRealm string
    The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
    CrossRealmTrustSharedPasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
    EnableKerberos bool
    Flag to indicate whether to Kerberize the cluster.
    KdcDbKeyUri string
    The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
    KeyPasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
    KeystorePasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
    KeystoreUri string
    The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    Realm string
    The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
    TgtLifetimeHours int
    The lifetime of the ticket granting ticket, in hours.
    TruststorePasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
    TruststoreUri string
    The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.


    kmsKeyUri String
    The URI of the KMS key used to encrypt various sensitive files.
    rootPrincipalPasswordUri String
    The Cloud Storage URI of a KMS encrypted file containing the root principal password.
    crossRealmTrustAdminServer String
    The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustKdc String
    The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustRealm String
    The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
    crossRealmTrustSharedPasswordUri String
    The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
    enableKerberos Boolean
    Flag to indicate whether to Kerberize the cluster.
    kdcDbKeyUri String
    The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
    keyPasswordUri String
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
    keystorePasswordUri String
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
    keystoreUri String
    The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    realm String
    The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
    tgtLifetimeHours Integer
    The lifetime of the ticket granting ticket, in hours.
    truststorePasswordUri String
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
    truststoreUri String
    The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.


    kmsKeyUri string
    The URI of the KMS key used to encrypt various sensitive files.
    rootPrincipalPasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the root principal password.
    crossRealmTrustAdminServer string
    The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustKdc string
    The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustRealm string
    The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
    crossRealmTrustSharedPasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
    enableKerberos boolean
    Flag to indicate whether to Kerberize the cluster.
    kdcDbKeyUri string
    The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
    keyPasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
    keystorePasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
    keystoreUri string
    The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    realm string
    The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
    tgtLifetimeHours number
    The lifetime of the ticket granting ticket, in hours.
    truststorePasswordUri string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
    truststoreUri string
    The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.


    kms_key_uri str
    The URI of the KMS key used to encrypt various sensitive files.
    root_principal_password_uri str
    The Cloud Storage URI of a KMS encrypted file containing the root principal password.
    cross_realm_trust_admin_server str
    The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    cross_realm_trust_kdc str
    The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    cross_realm_trust_realm str
    The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
    cross_realm_trust_shared_password_uri str
    The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
    enable_kerberos bool
    Flag to indicate whether to Kerberize the cluster.
    kdc_db_key_uri str
    The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
    key_password_uri str
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
    keystore_password_uri str
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
    keystore_uri str
    The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    realm str
    The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
    tgt_lifetime_hours int
    The lifetime of the ticket granting ticket, in hours.
    truststore_password_uri str
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
    truststore_uri str
    The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.


    kmsKeyUri String
    The URI of the KMS key used to encrypt various sensitive files.
    rootPrincipalPasswordUri String
    The Cloud Storage URI of a KMS encrypted file containing the root principal password.
    crossRealmTrustAdminServer String
    The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustKdc String
    The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustRealm String
    The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
    crossRealmTrustSharedPasswordUri String
    The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
    enableKerberos Boolean
    Flag to indicate whether to Kerberize the cluster.
    kdcDbKeyUri String
    The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
    keyPasswordUri String
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
    keystorePasswordUri String
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.
    keystoreUri String
    The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    realm String
    The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
    tgtLifetimeHours Number
    The lifetime of the ticket granting ticket, in hours.
    truststorePasswordUri String
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
    truststoreUri String
    The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.


    ClusterClusterConfigSoftwareConfig, ClusterClusterConfigSoftwareConfigArgs

    ImageVersion string
    The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
    OptionalComponents List<string>
    The set of optional components to activate on the cluster. See Available Optional Components.


    OverrideProperties Dictionary<string, string>
    A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
    Properties Dictionary<string, object>
    The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
    ImageVersion string
    The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
    OptionalComponents []string
    The set of optional components to activate on the cluster. See Available Optional Components.


    OverrideProperties map[string]string
    A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
    Properties map[string]interface{}
    The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
    imageVersion String
    The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
    optionalComponents List<String>
    The set of optional components to activate on the cluster. See Available Optional Components.


    overrideProperties Map<String,String>
    A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
    properties Map<String,Object>
    The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
    imageVersion string
    The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
    optionalComponents string[]
    The set of optional components to activate on the cluster. See Available Optional Components.


    overrideProperties {[key: string]: string}
    A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
    properties {[key: string]: any}
    The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
    image_version str
    The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
    optional_components Sequence[str]
    The set of optional components to activate on the cluster. See Available Optional Components.


    override_properties Mapping[str, str]
    A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
    properties Mapping[str, Any]
    The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
    imageVersion String
    The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions
    optionalComponents List<String>
    The set of optional components to activate on the cluster. See Available Optional Components.


    overrideProperties Map<String>
    A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties
    properties Map<Any>
    The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.

    ClusterClusterConfigWorkerConfig, ClusterClusterConfigWorkerConfigArgs

    Accelerators List<ClusterClusterConfigWorkerConfigAccelerator>
    The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
    DiskConfig ClusterClusterConfigWorkerConfigDiskConfig
    Disk Config
    ImageUri string
    The URI for the image to use for this worker. See the guide for more information.
    InstanceNames List<string>
    List of master/worker instance names which have been assigned to the cluster.
    MachineType string
    The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    MinCpuPlatform string
    The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    MinNumInstances int
    The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.
    NumInstances int
    Specifies the number of worker nodes to create. If not specified, GCP will default to a predetermined computed value (currently 2). There is currently a beta feature which allows you to run a Single Node Cluster. In order to take advantage of this you need to set "dataproc:dataproc.allow.zero.workers" = "true" in cluster_config.software_config.properties
    Accelerators []ClusterClusterConfigWorkerConfigAccelerator
    The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
    DiskConfig ClusterClusterConfigWorkerConfigDiskConfig
    Disk Config
    ImageUri string
    The URI for the image to use for this worker. See the guide for more information.
    InstanceNames []string
    List of master/worker instance names which have been assigned to the cluster.
    MachineType string
    The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    MinCpuPlatform string
    The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    MinNumInstances int
    The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.
    NumInstances int
    Specifies the number of worker nodes to create. If not specified, GCP will default to a predetermined computed value (currently 2). There is currently a beta feature which allows you to run a Single Node Cluster. In order to take advantage of this you need to set "dataproc:dataproc.allow.zero.workers" = "true" in cluster_config.software_config.properties
    accelerators List<ClusterClusterConfigWorkerConfigAccelerator>
    The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
    diskConfig ClusterClusterConfigWorkerConfigDiskConfig
    Disk Config
    imageUri String
    The URI for the image to use for this worker. See the guide for more information.
    instanceNames List<String>
    List of master/worker instance names which have been assigned to the cluster.
    machineType String
    The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    minCpuPlatform String
    The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    minNumInstances Integer
    The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.
    numInstances Integer
    Specifies the number of worker nodes to create. If not specified, GCP will default to a predetermined computed value (currently 2). There is currently a beta feature which allows you to run a Single Node Cluster. In order to take advantage of this you need to set "dataproc:dataproc.allow.zero.workers" = "true" in cluster_config.software_config.properties
    accelerators ClusterClusterConfigWorkerConfigAccelerator[]
    The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
    diskConfig ClusterClusterConfigWorkerConfigDiskConfig
    Disk Config
    imageUri string
    The URI for the image to use for this worker. See the guide for more information.
    instanceNames string[]
    List of master/worker instance names which have been assigned to the cluster.
    machineType string
    The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    minCpuPlatform string
    The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    minNumInstances number
    The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.
    numInstances number
    Specifies the number of worker nodes to create. If not specified, GCP will default to a predetermined computed value (currently 2). There is currently a beta feature which allows you to run a Single Node Cluster. In order to take advantage of this you need to set "dataproc:dataproc.allow.zero.workers" = "true" in cluster_config.software_config.properties
    accelerators Sequence[ClusterClusterConfigWorkerConfigAccelerator]
    The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
    disk_config ClusterClusterConfigWorkerConfigDiskConfig
    Disk Config
    image_uri str
    The URI for the image to use for this worker. See the guide for more information.
    instance_names Sequence[str]
    List of master/worker instance names which have been assigned to the cluster.
    machine_type str
    The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    min_cpu_platform str
    The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    min_num_instances int
    The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.
    num_instances int
    Specifies the number of worker nodes to create. If not specified, GCP will default to a predetermined computed value (currently 2). There is currently a beta feature which allows you to run a Single Node Cluster. In order to take advantage of this you need to set "dataproc:dataproc.allow.zero.workers" = "true" in cluster_config.software_config.properties
    accelerators List<Property Map>
    The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
    diskConfig Property Map
    Disk Config
    imageUri String
    The URI for the image to use for this worker. See the guide for more information.
    instanceNames List<String>
    List of master/worker instance names which have been assigned to the cluster.
    machineType String
    The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    minCpuPlatform String
    The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    minNumInstances Number
    The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.
    numInstances Number
    Specifies the number of worker nodes to create. If not specified, GCP will default to a predetermined computed value (currently 2). There is currently a beta feature which allows you to run a Single Node Cluster. In order to take advantage of this you need to set "dataproc:dataproc.allow.zero.workers" = "true" in cluster_config.software_config.properties

    ClusterClusterConfigWorkerConfigAccelerator, ClusterClusterConfigWorkerConfigAcceleratorArgs

    AcceleratorCount int
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    AcceleratorType string
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    AcceleratorCount int
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    AcceleratorType string
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    acceleratorCount Integer
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    acceleratorType String
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    acceleratorCount number
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    acceleratorType string
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    accelerator_count int
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    accelerator_type str
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.
    acceleratorCount Number
    The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.


    acceleratorType String
    The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.

    ClusterClusterConfigWorkerConfigDiskConfig, ClusterClusterConfigWorkerConfigDiskConfigArgs

    BootDiskSizeGb int
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    BootDiskType string
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    NumLocalSsds int
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    BootDiskSizeGb int
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    BootDiskType string
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    NumLocalSsds int
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    bootDiskSizeGb Integer
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    bootDiskType String
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    numLocalSsds Integer
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    bootDiskSizeGb number
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    bootDiskType string
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    numLocalSsds number
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    boot_disk_size_gb int
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    boot_disk_type str
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    num_local_ssds int
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.
    bootDiskSizeGb Number
    Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.
    bootDiskType String
    The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".
    numLocalSsds Number
    The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.

    ClusterVirtualClusterConfig, ClusterVirtualClusterConfigArgs

    AuxiliaryServicesConfig ClusterVirtualClusterConfigAuxiliaryServicesConfig
    Configuration of auxiliary services used by this cluster. Structure defined below.
    KubernetesClusterConfig ClusterVirtualClusterConfigKubernetesClusterConfig
    The configuration for running the Dataproc cluster on Kubernetes. Structure defined below.


    StagingBucket string
    The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
    AuxiliaryServicesConfig ClusterVirtualClusterConfigAuxiliaryServicesConfig
    Configuration of auxiliary services used by this cluster. Structure defined below.
    KubernetesClusterConfig ClusterVirtualClusterConfigKubernetesClusterConfig
    The configuration for running the Dataproc cluster on Kubernetes. Structure defined below.


    StagingBucket string
    The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
    auxiliaryServicesConfig ClusterVirtualClusterConfigAuxiliaryServicesConfig
    Configuration of auxiliary services used by this cluster. Structure defined below.
    kubernetesClusterConfig ClusterVirtualClusterConfigKubernetesClusterConfig
    The configuration for running the Dataproc cluster on Kubernetes. Structure defined below.


    stagingBucket String
    The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
    auxiliaryServicesConfig ClusterVirtualClusterConfigAuxiliaryServicesConfig
    Configuration of auxiliary services used by this cluster. Structure defined below.
    kubernetesClusterConfig ClusterVirtualClusterConfigKubernetesClusterConfig
    The configuration for running the Dataproc cluster on Kubernetes. Structure defined below.


    stagingBucket string
    The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
    auxiliary_services_config ClusterVirtualClusterConfigAuxiliaryServicesConfig
    Configuration of auxiliary services used by this cluster. Structure defined below.
    kubernetes_cluster_config ClusterVirtualClusterConfigKubernetesClusterConfig
    The configuration for running the Dataproc cluster on Kubernetes. Structure defined below.


    staging_bucket str
    The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.
    auxiliaryServicesConfig Property Map
    Configuration of auxiliary services used by this cluster. Structure defined below.
    kubernetesClusterConfig Property Map
    The configuration for running the Dataproc cluster on Kubernetes. Structure defined below.


    stagingBucket String
    The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.

    ClusterVirtualClusterConfigAuxiliaryServicesConfig, ClusterVirtualClusterConfigAuxiliaryServicesConfigArgs

    metastoreConfig Property Map
    The Hive Metastore configuration for this workload.
    sparkHistoryServerConfig Property Map
    The Spark History Server configuration for the workload.

    ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigArgs

    DataprocMetastoreService string

    Resource name of an existing Dataproc Metastore service.

    Only resource names including projectid and location (region) are valid. Examples:

    projects/[projectId]/locations/[dataproc_region]/services/[service-name]

    DataprocMetastoreService string

    Resource name of an existing Dataproc Metastore service.

    Only resource names including projectid and location (region) are valid. Examples:

    projects/[projectId]/locations/[dataproc_region]/services/[service-name]

    dataprocMetastoreService String

    Resource name of an existing Dataproc Metastore service.

    Only resource names including projectid and location (region) are valid. Examples:

    projects/[projectId]/locations/[dataproc_region]/services/[service-name]

    dataprocMetastoreService string

    Resource name of an existing Dataproc Metastore service.

    Only resource names including projectid and location (region) are valid. Examples:

    projects/[projectId]/locations/[dataproc_region]/services/[service-name]

    dataproc_metastore_service str

    Resource name of an existing Dataproc Metastore service.

    Only resource names including projectid and location (region) are valid. Examples:

    projects/[projectId]/locations/[dataproc_region]/services/[service-name]

    dataprocMetastoreService String

    Resource name of an existing Dataproc Metastore service.

    Only resource names including projectid and location (region) are valid. Examples:

    projects/[projectId]/locations/[dataproc_region]/services/[service-name]

    ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigArgs

    DataprocCluster string
    Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.


    DataprocCluster string
    Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.


    dataprocCluster String
    Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.


    dataprocCluster string
    Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.


    dataproc_cluster str
    Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.


    dataprocCluster String
    Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.


    ClusterVirtualClusterConfigKubernetesClusterConfig, ClusterVirtualClusterConfigKubernetesClusterConfigArgs

    GkeClusterConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig
    The configuration for running the Dataproc cluster on GKE.
    KubernetesSoftwareConfig ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig
    The software configuration for this Dataproc cluster running on Kubernetes.
    KubernetesNamespace string
    A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
    GkeClusterConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig
    The configuration for running the Dataproc cluster on GKE.
    KubernetesSoftwareConfig ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig
    The software configuration for this Dataproc cluster running on Kubernetes.
    KubernetesNamespace string
    A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
    gkeClusterConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig
    The configuration for running the Dataproc cluster on GKE.
    kubernetesSoftwareConfig ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig
    The software configuration for this Dataproc cluster running on Kubernetes.
    kubernetesNamespace String
    A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
    gkeClusterConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig
    The configuration for running the Dataproc cluster on GKE.
    kubernetesSoftwareConfig ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig
    The software configuration for this Dataproc cluster running on Kubernetes.
    kubernetesNamespace string
    A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
    gke_cluster_config ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig
    The configuration for running the Dataproc cluster on GKE.
    kubernetes_software_config ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig
    The software configuration for this Dataproc cluster running on Kubernetes.
    kubernetes_namespace str
    A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.
    gkeClusterConfig Property Map
    The configuration for running the Dataproc cluster on GKE.
    kubernetesSoftwareConfig Property Map
    The software configuration for this Dataproc cluster running on Kubernetes.
    kubernetesNamespace String
    A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.

    ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigArgs

    GkeClusterTarget string
    A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)
    NodePoolTargets List<ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget>
    GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
    GkeClusterTarget string
    A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)
    NodePoolTargets []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget
    GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
    gkeClusterTarget String
    A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)
    nodePoolTargets List<ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget>
    GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
    gkeClusterTarget string
    A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)
    nodePoolTargets ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget[]
    GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
    gke_cluster_target str
    A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)
    node_pool_targets Sequence[ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget]
    GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
    gkeClusterTarget String
    A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)
    nodePoolTargets List<Property Map>
    GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.

    ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetArgs

    NodePool string
    The target GKE node pool.
    Roles List<string>
    Node group roles. One of "DRIVER".
    NodePoolConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig
    The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.
    NodePool string
    The target GKE node pool.
    Roles []string
    Node group roles. One of "DRIVER".
    NodePoolConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig
    The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.
    nodePool String
    The target GKE node pool.
    roles List<String>
    Node group roles. One of "DRIVER".
    nodePoolConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig
    The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.
    nodePool string
    The target GKE node pool.
    roles string[]
    Node group roles. One of "DRIVER".
    nodePoolConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig
    The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.
    node_pool str
    The target GKE node pool.
    roles Sequence[str]
    Node group roles. One of "DRIVER".
    node_pool_config ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig
    The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.
    nodePool String
    The target GKE node pool.
    roles List<String>
    Node group roles. One of "DRIVER".
    nodePoolConfig Property Map
    The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.

    ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigArgs

    Locations List<string>
    The list of Compute Engine zones where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.


    Autoscaling ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling
    The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
    Config ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig
    The node pool configuration.
    Locations []string
    The list of Compute Engine zones where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.


    Autoscaling ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling
    The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
    Config ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig
    The node pool configuration.
    locations List<String>
    The list of Compute Engine zones where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.


    autoscaling ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling
    The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
    config ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig
    The node pool configuration.
    locations string[]
    The list of Compute Engine zones where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.


    autoscaling ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling
    The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
    config ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig
    The node pool configuration.
    locations Sequence[str]
    The list of Compute Engine zones where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.


    autoscaling ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling
    The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
    config ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig
    The node pool configuration.
    locations List<String>
    The list of Compute Engine zones where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.


    autoscaling Property Map
    The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.
    config Property Map
    The node pool configuration.

    ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingArgs

    MaxNodeCount int
    The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0.
    MinNodeCount int
    The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount.
    MaxNodeCount int
    The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0.
    MinNodeCount int
    The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount.
    maxNodeCount Integer
    The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0.
    minNodeCount Integer
    The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount.
    maxNodeCount number
    The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0.
    minNodeCount number
    The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount.
    max_node_count int
    The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0.
    min_node_count int
    The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount.
    maxNodeCount Number
    The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0.
    minNodeCount Number
    The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount.

    ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigArgs

    LocalSsdCount int
    The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone.
    MachineType string
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    MinCpuPlatform string
    The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    Preemptible bool
    Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
    Spot bool
    Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.
    LocalSsdCount int
    The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone.
    MachineType string
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    MinCpuPlatform string
    The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    Preemptible bool
    Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
    Spot bool
    Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.
    localSsdCount Integer
    The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone.
    machineType String
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    minCpuPlatform String
    The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    preemptible Boolean
    Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
    spot Boolean
    Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.
    localSsdCount number
    The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone.
    machineType string
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    minCpuPlatform string
    The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    preemptible boolean
    Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
    spot boolean
    Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.
    local_ssd_count int
    The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone.
    machine_type str
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    min_cpu_platform str
    The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    preemptible bool
    Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
    spot bool
    Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.
    localSsdCount Number
    The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone.
    machineType String
    The name of a Google Compute Engine machine type to create for the node group. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4).
    minCpuPlatform String
    The name of a minimum generation of CPU family for the node group. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone.
    preemptible Boolean
    Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
    spot Boolean
    Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.

    ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigArgs

    ComponentVersion Dictionary<string, string>
    The components that should be installed in this Dataproc cluster. The key must be a string from the
    KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.

    • NOTE : component_version[SPARK] is mandatory to set, or the creation of the cluster will fail.
    Properties Dictionary<string, string>
    The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
    ComponentVersion map[string]string
    The components that should be installed in this Dataproc cluster. The key must be a string from the
    KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.

    • NOTE : component_version[SPARK] is mandatory to set, or the creation of the cluster will fail.
    Properties map[string]string
    The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
    componentVersion Map<String,String>
    The components that should be installed in this Dataproc cluster. The key must be a string from the
    KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.

    • NOTE : component_version[SPARK] is mandatory to set, or the creation of the cluster will fail.
    properties Map<String,String>
    The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
    componentVersion {[key: string]: string}
    The components that should be installed in this Dataproc cluster. The key must be a string from the
    KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.

    • NOTE : component_version[SPARK] is mandatory to set, or the creation of the cluster will fail.
    properties {[key: string]: string}
    The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
    component_version Mapping[str, str]
    The components that should be installed in this Dataproc cluster. The key must be a string from the
    KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.

    • NOTE : component_version[SPARK] is mandatory to set, or the creation of the cluster will fail.
    properties Mapping[str, str]
    The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.
    componentVersion Map<String>
    The components that should be installed in this Dataproc cluster. The key must be a string from the
    KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.

    • NOTE : component_version[SPARK] is mandatory to set, or the creation of the cluster will fail.
    properties Map<String>
    The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image.

    Import

    This resource does not support import.

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v7.19.0 published on Thursday, Apr 18, 2024 by Pulumi