1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. dataproc
  5. WorkflowTemplate
Google Cloud Classic v7.20.0 published on Wednesday, Apr 24, 2024 by Pulumi

gcp.dataproc.WorkflowTemplate

Explore with Pulumi AI

gcp logo
Google Cloud Classic v7.20.0 published on Wednesday, Apr 24, 2024 by Pulumi

    A Workflow Template is a reusable workflow configuration. It defines a graph of jobs with information on where to run those jobs.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const template = new gcp.dataproc.WorkflowTemplate("template", {
        name: "template-example",
        location: "us-central1",
        placement: {
            managedCluster: {
                clusterName: "my-cluster",
                config: {
                    gceClusterConfig: {
                        zone: "us-central1-a",
                        tags: [
                            "foo",
                            "bar",
                        ],
                    },
                    masterConfig: {
                        numInstances: 1,
                        machineType: "n1-standard-1",
                        diskConfig: {
                            bootDiskType: "pd-ssd",
                            bootDiskSizeGb: 15,
                        },
                    },
                    workerConfig: {
                        numInstances: 3,
                        machineType: "n1-standard-2",
                        diskConfig: {
                            bootDiskSizeGb: 10,
                            numLocalSsds: 2,
                        },
                    },
                    secondaryWorkerConfig: {
                        numInstances: 2,
                    },
                    softwareConfig: {
                        imageVersion: "2.0.35-debian10",
                    },
                },
            },
        },
        jobs: [
            {
                stepId: "someJob",
                sparkJob: {
                    mainClass: "SomeClass",
                },
            },
            {
                stepId: "otherJob",
                prerequisiteStepIds: ["someJob"],
                prestoJob: {
                    queryFileUri: "someuri",
                },
            },
        ],
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    template = gcp.dataproc.WorkflowTemplate("template",
        name="template-example",
        location="us-central1",
        placement=gcp.dataproc.WorkflowTemplatePlacementArgs(
            managed_cluster=gcp.dataproc.WorkflowTemplatePlacementManagedClusterArgs(
                cluster_name="my-cluster",
                config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigArgs(
                    gce_cluster_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs(
                        zone="us-central1-a",
                        tags=[
                            "foo",
                            "bar",
                        ],
                    ),
                    master_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs(
                        num_instances=1,
                        machine_type="n1-standard-1",
                        disk_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs(
                            boot_disk_type="pd-ssd",
                            boot_disk_size_gb=15,
                        ),
                    ),
                    worker_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs(
                        num_instances=3,
                        machine_type="n1-standard-2",
                        disk_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs(
                            boot_disk_size_gb=10,
                            num_local_ssds=2,
                        ),
                    ),
                    secondary_worker_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs(
                        num_instances=2,
                    ),
                    software_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs(
                        image_version="2.0.35-debian10",
                    ),
                ),
            ),
        ),
        jobs=[
            gcp.dataproc.WorkflowTemplateJobArgs(
                step_id="someJob",
                spark_job=gcp.dataproc.WorkflowTemplateJobSparkJobArgs(
                    main_class="SomeClass",
                ),
            ),
            gcp.dataproc.WorkflowTemplateJobArgs(
                step_id="otherJob",
                prerequisite_step_ids=["someJob"],
                presto_job=gcp.dataproc.WorkflowTemplateJobPrestoJobArgs(
                    query_file_uri="someuri",
                ),
            ),
        ])
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataproc"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dataproc.NewWorkflowTemplate(ctx, "template", &dataproc.WorkflowTemplateArgs{
    			Name:     pulumi.String("template-example"),
    			Location: pulumi.String("us-central1"),
    			Placement: &dataproc.WorkflowTemplatePlacementArgs{
    				ManagedCluster: &dataproc.WorkflowTemplatePlacementManagedClusterArgs{
    					ClusterName: pulumi.String("my-cluster"),
    					Config: &dataproc.WorkflowTemplatePlacementManagedClusterConfigArgs{
    						GceClusterConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs{
    							Zone: pulumi.String("us-central1-a"),
    							Tags: pulumi.StringArray{
    								pulumi.String("foo"),
    								pulumi.String("bar"),
    							},
    						},
    						MasterConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs{
    							NumInstances: pulumi.Int(1),
    							MachineType:  pulumi.String("n1-standard-1"),
    							DiskConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs{
    								BootDiskType:   pulumi.String("pd-ssd"),
    								BootDiskSizeGb: pulumi.Int(15),
    							},
    						},
    						WorkerConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs{
    							NumInstances: pulumi.Int(3),
    							MachineType:  pulumi.String("n1-standard-2"),
    							DiskConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs{
    								BootDiskSizeGb: pulumi.Int(10),
    								NumLocalSsds:   pulumi.Int(2),
    							},
    						},
    						SecondaryWorkerConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs{
    							NumInstances: pulumi.Int(2),
    						},
    						SoftwareConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs{
    							ImageVersion: pulumi.String("2.0.35-debian10"),
    						},
    					},
    				},
    			},
    			Jobs: dataproc.WorkflowTemplateJobArray{
    				&dataproc.WorkflowTemplateJobArgs{
    					StepId: pulumi.String("someJob"),
    					SparkJob: &dataproc.WorkflowTemplateJobSparkJobArgs{
    						MainClass: pulumi.String("SomeClass"),
    					},
    				},
    				&dataproc.WorkflowTemplateJobArgs{
    					StepId: pulumi.String("otherJob"),
    					PrerequisiteStepIds: pulumi.StringArray{
    						pulumi.String("someJob"),
    					},
    					PrestoJob: &dataproc.WorkflowTemplateJobPrestoJobArgs{
    						QueryFileUri: pulumi.String("someuri"),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var template = new Gcp.Dataproc.WorkflowTemplate("template", new()
        {
            Name = "template-example",
            Location = "us-central1",
            Placement = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementArgs
            {
                ManagedCluster = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterArgs
                {
                    ClusterName = "my-cluster",
                    Config = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigArgs
                    {
                        GceClusterConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs
                        {
                            Zone = "us-central1-a",
                            Tags = new[]
                            {
                                "foo",
                                "bar",
                            },
                        },
                        MasterConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs
                        {
                            NumInstances = 1,
                            MachineType = "n1-standard-1",
                            DiskConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs
                            {
                                BootDiskType = "pd-ssd",
                                BootDiskSizeGb = 15,
                            },
                        },
                        WorkerConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs
                        {
                            NumInstances = 3,
                            MachineType = "n1-standard-2",
                            DiskConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs
                            {
                                BootDiskSizeGb = 10,
                                NumLocalSsds = 2,
                            },
                        },
                        SecondaryWorkerConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs
                        {
                            NumInstances = 2,
                        },
                        SoftwareConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs
                        {
                            ImageVersion = "2.0.35-debian10",
                        },
                    },
                },
            },
            Jobs = new[]
            {
                new Gcp.Dataproc.Inputs.WorkflowTemplateJobArgs
                {
                    StepId = "someJob",
                    SparkJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkJobArgs
                    {
                        MainClass = "SomeClass",
                    },
                },
                new Gcp.Dataproc.Inputs.WorkflowTemplateJobArgs
                {
                    StepId = "otherJob",
                    PrerequisiteStepIds = new[]
                    {
                        "someJob",
                    },
                    PrestoJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPrestoJobArgs
                    {
                        QueryFileUri = "someuri",
                    },
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.dataproc.WorkflowTemplate;
    import com.pulumi.gcp.dataproc.WorkflowTemplateArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplateJobArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplateJobSparkJobArgs;
    import com.pulumi.gcp.dataproc.inputs.WorkflowTemplateJobPrestoJobArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var template = new WorkflowTemplate("template", WorkflowTemplateArgs.builder()        
                .name("template-example")
                .location("us-central1")
                .placement(WorkflowTemplatePlacementArgs.builder()
                    .managedCluster(WorkflowTemplatePlacementManagedClusterArgs.builder()
                        .clusterName("my-cluster")
                        .config(WorkflowTemplatePlacementManagedClusterConfigArgs.builder()
                            .gceClusterConfig(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs.builder()
                                .zone("us-central1-a")
                                .tags(                            
                                    "foo",
                                    "bar")
                                .build())
                            .masterConfig(WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs.builder()
                                .numInstances(1)
                                .machineType("n1-standard-1")
                                .diskConfig(WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs.builder()
                                    .bootDiskType("pd-ssd")
                                    .bootDiskSizeGb(15)
                                    .build())
                                .build())
                            .workerConfig(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs.builder()
                                .numInstances(3)
                                .machineType("n1-standard-2")
                                .diskConfig(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs.builder()
                                    .bootDiskSizeGb(10)
                                    .numLocalSsds(2)
                                    .build())
                                .build())
                            .secondaryWorkerConfig(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs.builder()
                                .numInstances(2)
                                .build())
                            .softwareConfig(WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs.builder()
                                .imageVersion("2.0.35-debian10")
                                .build())
                            .build())
                        .build())
                    .build())
                .jobs(            
                    WorkflowTemplateJobArgs.builder()
                        .stepId("someJob")
                        .sparkJob(WorkflowTemplateJobSparkJobArgs.builder()
                            .mainClass("SomeClass")
                            .build())
                        .build(),
                    WorkflowTemplateJobArgs.builder()
                        .stepId("otherJob")
                        .prerequisiteStepIds("someJob")
                        .prestoJob(WorkflowTemplateJobPrestoJobArgs.builder()
                            .queryFileUri("someuri")
                            .build())
                        .build())
                .build());
    
        }
    }
    
    resources:
      template:
        type: gcp:dataproc:WorkflowTemplate
        properties:
          name: template-example
          location: us-central1
          placement:
            managedCluster:
              clusterName: my-cluster
              config:
                gceClusterConfig:
                  zone: us-central1-a
                  tags:
                    - foo
                    - bar
                masterConfig:
                  numInstances: 1
                  machineType: n1-standard-1
                  diskConfig:
                    bootDiskType: pd-ssd
                    bootDiskSizeGb: 15
                workerConfig:
                  numInstances: 3
                  machineType: n1-standard-2
                  diskConfig:
                    bootDiskSizeGb: 10
                    numLocalSsds: 2
                secondaryWorkerConfig:
                  numInstances: 2
                softwareConfig:
                  imageVersion: 2.0.35-debian10
          jobs:
            - stepId: someJob
              sparkJob:
                mainClass: SomeClass
            - stepId: otherJob
              prerequisiteStepIds:
                - someJob
              prestoJob:
                queryFileUri: someuri
    

    Create WorkflowTemplate Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new WorkflowTemplate(name: string, args: WorkflowTemplateArgs, opts?: CustomResourceOptions);
    @overload
    def WorkflowTemplate(resource_name: str,
                         args: WorkflowTemplateArgs,
                         opts: Optional[ResourceOptions] = None)
    
    @overload
    def WorkflowTemplate(resource_name: str,
                         opts: Optional[ResourceOptions] = None,
                         jobs: Optional[Sequence[WorkflowTemplateJobArgs]] = None,
                         location: Optional[str] = None,
                         placement: Optional[WorkflowTemplatePlacementArgs] = None,
                         dag_timeout: Optional[str] = None,
                         labels: Optional[Mapping[str, str]] = None,
                         name: Optional[str] = None,
                         parameters: Optional[Sequence[WorkflowTemplateParameterArgs]] = None,
                         project: Optional[str] = None,
                         version: Optional[int] = None)
    func NewWorkflowTemplate(ctx *Context, name string, args WorkflowTemplateArgs, opts ...ResourceOption) (*WorkflowTemplate, error)
    public WorkflowTemplate(string name, WorkflowTemplateArgs args, CustomResourceOptions? opts = null)
    public WorkflowTemplate(String name, WorkflowTemplateArgs args)
    public WorkflowTemplate(String name, WorkflowTemplateArgs args, CustomResourceOptions options)
    
    type: gcp:dataproc:WorkflowTemplate
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args WorkflowTemplateArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args WorkflowTemplateArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args WorkflowTemplateArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args WorkflowTemplateArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args WorkflowTemplateArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var workflowTemplateResource = new Gcp.Dataproc.WorkflowTemplate("workflowTemplateResource", new()
    {
        Jobs = new[]
        {
            new Gcp.Dataproc.Inputs.WorkflowTemplateJobArgs
            {
                StepId = "string",
                HadoopJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobHadoopJobArgs
                {
                    ArchiveUris = new[]
                    {
                        "string",
                    },
                    Args = new[]
                    {
                        "string",
                    },
                    FileUris = new[]
                    {
                        "string",
                    },
                    JarFileUris = new[]
                    {
                        "string",
                    },
                    LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobHadoopJobLoggingConfigArgs
                    {
                        DriverLogLevels = 
                        {
                            { "string", "string" },
                        },
                    },
                    MainClass = "string",
                    MainJarFileUri = "string",
                    Properties = 
                    {
                        { "string", "string" },
                    },
                },
                HiveJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobHiveJobArgs
                {
                    ContinueOnFailure = false,
                    JarFileUris = new[]
                    {
                        "string",
                    },
                    Properties = 
                    {
                        { "string", "string" },
                    },
                    QueryFileUri = "string",
                    QueryList = new Gcp.Dataproc.Inputs.WorkflowTemplateJobHiveJobQueryListArgs
                    {
                        Queries = new[]
                        {
                            "string",
                        },
                    },
                    ScriptVariables = 
                    {
                        { "string", "string" },
                    },
                },
                Labels = 
                {
                    { "string", "string" },
                },
                PigJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPigJobArgs
                {
                    ContinueOnFailure = false,
                    JarFileUris = new[]
                    {
                        "string",
                    },
                    LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPigJobLoggingConfigArgs
                    {
                        DriverLogLevels = 
                        {
                            { "string", "string" },
                        },
                    },
                    Properties = 
                    {
                        { "string", "string" },
                    },
                    QueryFileUri = "string",
                    QueryList = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPigJobQueryListArgs
                    {
                        Queries = new[]
                        {
                            "string",
                        },
                    },
                    ScriptVariables = 
                    {
                        { "string", "string" },
                    },
                },
                PrerequisiteStepIds = new[]
                {
                    "string",
                },
                PrestoJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPrestoJobArgs
                {
                    ClientTags = new[]
                    {
                        "string",
                    },
                    ContinueOnFailure = false,
                    LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPrestoJobLoggingConfigArgs
                    {
                        DriverLogLevels = 
                        {
                            { "string", "string" },
                        },
                    },
                    OutputFormat = "string",
                    Properties = 
                    {
                        { "string", "string" },
                    },
                    QueryFileUri = "string",
                    QueryList = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPrestoJobQueryListArgs
                    {
                        Queries = new[]
                        {
                            "string",
                        },
                    },
                },
                PysparkJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPysparkJobArgs
                {
                    MainPythonFileUri = "string",
                    ArchiveUris = new[]
                    {
                        "string",
                    },
                    Args = new[]
                    {
                        "string",
                    },
                    FileUris = new[]
                    {
                        "string",
                    },
                    JarFileUris = new[]
                    {
                        "string",
                    },
                    LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPysparkJobLoggingConfigArgs
                    {
                        DriverLogLevels = 
                        {
                            { "string", "string" },
                        },
                    },
                    Properties = 
                    {
                        { "string", "string" },
                    },
                    PythonFileUris = new[]
                    {
                        "string",
                    },
                },
                Scheduling = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSchedulingArgs
                {
                    MaxFailuresPerHour = 0,
                    MaxFailuresTotal = 0,
                },
                SparkJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkJobArgs
                {
                    ArchiveUris = new[]
                    {
                        "string",
                    },
                    Args = new[]
                    {
                        "string",
                    },
                    FileUris = new[]
                    {
                        "string",
                    },
                    JarFileUris = new[]
                    {
                        "string",
                    },
                    LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkJobLoggingConfigArgs
                    {
                        DriverLogLevels = 
                        {
                            { "string", "string" },
                        },
                    },
                    MainClass = "string",
                    MainJarFileUri = "string",
                    Properties = 
                    {
                        { "string", "string" },
                    },
                },
                SparkRJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkRJobArgs
                {
                    MainRFileUri = "string",
                    ArchiveUris = new[]
                    {
                        "string",
                    },
                    Args = new[]
                    {
                        "string",
                    },
                    FileUris = new[]
                    {
                        "string",
                    },
                    LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkRJobLoggingConfigArgs
                    {
                        DriverLogLevels = 
                        {
                            { "string", "string" },
                        },
                    },
                    Properties = 
                    {
                        { "string", "string" },
                    },
                },
                SparkSqlJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkSqlJobArgs
                {
                    JarFileUris = new[]
                    {
                        "string",
                    },
                    LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkSqlJobLoggingConfigArgs
                    {
                        DriverLogLevels = 
                        {
                            { "string", "string" },
                        },
                    },
                    Properties = 
                    {
                        { "string", "string" },
                    },
                    QueryFileUri = "string",
                    QueryList = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkSqlJobQueryListArgs
                    {
                        Queries = new[]
                        {
                            "string",
                        },
                    },
                    ScriptVariables = 
                    {
                        { "string", "string" },
                    },
                },
            },
        },
        Location = "string",
        Placement = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementArgs
        {
            ClusterSelector = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementClusterSelectorArgs
            {
                ClusterLabels = 
                {
                    { "string", "string" },
                },
                Zone = "string",
            },
            ManagedCluster = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterArgs
            {
                ClusterName = "string",
                Config = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigArgs
                {
                    AutoscalingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs
                    {
                        Policy = "string",
                    },
                    EncryptionConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs
                    {
                        GcePdKmsKeyName = "string",
                    },
                    EndpointConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs
                    {
                        EnableHttpPortAccess = false,
                        HttpPorts = 
                        {
                            { "string", "string" },
                        },
                    },
                    GceClusterConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs
                    {
                        InternalIpOnly = false,
                        Metadata = 
                        {
                            { "string", "string" },
                        },
                        Network = "string",
                        NodeGroupAffinity = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs
                        {
                            NodeGroup = "string",
                        },
                        PrivateIpv6GoogleAccess = "string",
                        ReservationAffinity = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs
                        {
                            ConsumeReservationType = "string",
                            Key = "string",
                            Values = new[]
                            {
                                "string",
                            },
                        },
                        ServiceAccount = "string",
                        ServiceAccountScopes = new[]
                        {
                            "string",
                        },
                        ShieldedInstanceConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigArgs
                        {
                            EnableIntegrityMonitoring = false,
                            EnableSecureBoot = false,
                            EnableVtpm = false,
                        },
                        Subnetwork = "string",
                        Tags = new[]
                        {
                            "string",
                        },
                        Zone = "string",
                    },
                    GkeClusterConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs
                    {
                        NamespacedGkeDeploymentTarget = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs
                        {
                            ClusterNamespace = "string",
                            TargetGkeCluster = "string",
                        },
                    },
                    InitializationActions = new[]
                    {
                        new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs
                        {
                            ExecutableFile = "string",
                            ExecutionTimeout = "string",
                        },
                    },
                    LifecycleConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs
                    {
                        AutoDeleteTime = "string",
                        AutoDeleteTtl = "string",
                        IdleDeleteTtl = "string",
                        IdleStartTime = "string",
                    },
                    MasterConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs
                    {
                        Accelerators = new[]
                        {
                            new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs
                            {
                                AcceleratorCount = 0,
                                AcceleratorType = "string",
                            },
                        },
                        DiskConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs
                        {
                            BootDiskSizeGb = 0,
                            BootDiskType = "string",
                            NumLocalSsds = 0,
                        },
                        Image = "string",
                        InstanceNames = new[]
                        {
                            "string",
                        },
                        IsPreemptible = false,
                        MachineType = "string",
                        ManagedGroupConfigs = new[]
                        {
                            new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs
                            {
                                InstanceGroupManagerName = "string",
                                InstanceTemplateName = "string",
                            },
                        },
                        MinCpuPlatform = "string",
                        NumInstances = 0,
                        Preemptibility = "string",
                    },
                    MetastoreConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs
                    {
                        DataprocMetastoreService = "string",
                    },
                    SecondaryWorkerConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs
                    {
                        Accelerators = new[]
                        {
                            new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs
                            {
                                AcceleratorCount = 0,
                                AcceleratorType = "string",
                            },
                        },
                        DiskConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs
                        {
                            BootDiskSizeGb = 0,
                            BootDiskType = "string",
                            NumLocalSsds = 0,
                        },
                        Image = "string",
                        InstanceNames = new[]
                        {
                            "string",
                        },
                        IsPreemptible = false,
                        MachineType = "string",
                        ManagedGroupConfigs = new[]
                        {
                            new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs
                            {
                                InstanceGroupManagerName = "string",
                                InstanceTemplateName = "string",
                            },
                        },
                        MinCpuPlatform = "string",
                        NumInstances = 0,
                        Preemptibility = "string",
                    },
                    SecurityConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs
                    {
                        KerberosConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs
                        {
                            CrossRealmTrustAdminServer = "string",
                            CrossRealmTrustKdc = "string",
                            CrossRealmTrustRealm = "string",
                            CrossRealmTrustSharedPassword = "string",
                            EnableKerberos = false,
                            KdcDbKey = "string",
                            KeyPassword = "string",
                            Keystore = "string",
                            KeystorePassword = "string",
                            KmsKey = "string",
                            Realm = "string",
                            RootPrincipalPassword = "string",
                            TgtLifetimeHours = 0,
                            Truststore = "string",
                            TruststorePassword = "string",
                        },
                    },
                    SoftwareConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs
                    {
                        ImageVersion = "string",
                        OptionalComponents = new[]
                        {
                            "string",
                        },
                        Properties = 
                        {
                            { "string", "string" },
                        },
                    },
                    StagingBucket = "string",
                    TempBucket = "string",
                    WorkerConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs
                    {
                        Accelerators = new[]
                        {
                            new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs
                            {
                                AcceleratorCount = 0,
                                AcceleratorType = "string",
                            },
                        },
                        DiskConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs
                        {
                            BootDiskSizeGb = 0,
                            BootDiskType = "string",
                            NumLocalSsds = 0,
                        },
                        Image = "string",
                        InstanceNames = new[]
                        {
                            "string",
                        },
                        IsPreemptible = false,
                        MachineType = "string",
                        ManagedGroupConfigs = new[]
                        {
                            new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs
                            {
                                InstanceGroupManagerName = "string",
                                InstanceTemplateName = "string",
                            },
                        },
                        MinCpuPlatform = "string",
                        NumInstances = 0,
                        Preemptibility = "string",
                    },
                },
                Labels = 
                {
                    { "string", "string" },
                },
            },
        },
        DagTimeout = "string",
        Labels = 
        {
            { "string", "string" },
        },
        Name = "string",
        Parameters = new[]
        {
            new Gcp.Dataproc.Inputs.WorkflowTemplateParameterArgs
            {
                Fields = new[]
                {
                    "string",
                },
                Name = "string",
                Description = "string",
                Validation = new Gcp.Dataproc.Inputs.WorkflowTemplateParameterValidationArgs
                {
                    Regex = new Gcp.Dataproc.Inputs.WorkflowTemplateParameterValidationRegexArgs
                    {
                        Regexes = new[]
                        {
                            "string",
                        },
                    },
                    Values = new Gcp.Dataproc.Inputs.WorkflowTemplateParameterValidationValuesArgs
                    {
                        Values = new[]
                        {
                            "string",
                        },
                    },
                },
            },
        },
        Project = "string",
    });
    
    example, err := dataproc.NewWorkflowTemplate(ctx, "workflowTemplateResource", &dataproc.WorkflowTemplateArgs{
    	Jobs: dataproc.WorkflowTemplateJobArray{
    		&dataproc.WorkflowTemplateJobArgs{
    			StepId: pulumi.String("string"),
    			HadoopJob: &dataproc.WorkflowTemplateJobHadoopJobArgs{
    				ArchiveUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				Args: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				FileUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				JarFileUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				LoggingConfig: &dataproc.WorkflowTemplateJobHadoopJobLoggingConfigArgs{
    					DriverLogLevels: pulumi.StringMap{
    						"string": pulumi.String("string"),
    					},
    				},
    				MainClass:      pulumi.String("string"),
    				MainJarFileUri: pulumi.String("string"),
    				Properties: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    			},
    			HiveJob: &dataproc.WorkflowTemplateJobHiveJobArgs{
    				ContinueOnFailure: pulumi.Bool(false),
    				JarFileUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				Properties: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    				QueryFileUri: pulumi.String("string"),
    				QueryList: &dataproc.WorkflowTemplateJobHiveJobQueryListArgs{
    					Queries: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    				},
    				ScriptVariables: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    			},
    			Labels: pulumi.StringMap{
    				"string": pulumi.String("string"),
    			},
    			PigJob: &dataproc.WorkflowTemplateJobPigJobArgs{
    				ContinueOnFailure: pulumi.Bool(false),
    				JarFileUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				LoggingConfig: &dataproc.WorkflowTemplateJobPigJobLoggingConfigArgs{
    					DriverLogLevels: pulumi.StringMap{
    						"string": pulumi.String("string"),
    					},
    				},
    				Properties: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    				QueryFileUri: pulumi.String("string"),
    				QueryList: &dataproc.WorkflowTemplateJobPigJobQueryListArgs{
    					Queries: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    				},
    				ScriptVariables: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    			},
    			PrerequisiteStepIds: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			PrestoJob: &dataproc.WorkflowTemplateJobPrestoJobArgs{
    				ClientTags: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				ContinueOnFailure: pulumi.Bool(false),
    				LoggingConfig: &dataproc.WorkflowTemplateJobPrestoJobLoggingConfigArgs{
    					DriverLogLevels: pulumi.StringMap{
    						"string": pulumi.String("string"),
    					},
    				},
    				OutputFormat: pulumi.String("string"),
    				Properties: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    				QueryFileUri: pulumi.String("string"),
    				QueryList: &dataproc.WorkflowTemplateJobPrestoJobQueryListArgs{
    					Queries: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    				},
    			},
    			PysparkJob: &dataproc.WorkflowTemplateJobPysparkJobArgs{
    				MainPythonFileUri: pulumi.String("string"),
    				ArchiveUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				Args: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				FileUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				JarFileUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				LoggingConfig: &dataproc.WorkflowTemplateJobPysparkJobLoggingConfigArgs{
    					DriverLogLevels: pulumi.StringMap{
    						"string": pulumi.String("string"),
    					},
    				},
    				Properties: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    				PythonFileUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    			},
    			Scheduling: &dataproc.WorkflowTemplateJobSchedulingArgs{
    				MaxFailuresPerHour: pulumi.Int(0),
    				MaxFailuresTotal:   pulumi.Int(0),
    			},
    			SparkJob: &dataproc.WorkflowTemplateJobSparkJobArgs{
    				ArchiveUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				Args: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				FileUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				JarFileUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				LoggingConfig: &dataproc.WorkflowTemplateJobSparkJobLoggingConfigArgs{
    					DriverLogLevels: pulumi.StringMap{
    						"string": pulumi.String("string"),
    					},
    				},
    				MainClass:      pulumi.String("string"),
    				MainJarFileUri: pulumi.String("string"),
    				Properties: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    			},
    			SparkRJob: &dataproc.WorkflowTemplateJobSparkRJobArgs{
    				MainRFileUri: pulumi.String("string"),
    				ArchiveUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				Args: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				FileUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				LoggingConfig: &dataproc.WorkflowTemplateJobSparkRJobLoggingConfigArgs{
    					DriverLogLevels: pulumi.StringMap{
    						"string": pulumi.String("string"),
    					},
    				},
    				Properties: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    			},
    			SparkSqlJob: &dataproc.WorkflowTemplateJobSparkSqlJobArgs{
    				JarFileUris: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				LoggingConfig: &dataproc.WorkflowTemplateJobSparkSqlJobLoggingConfigArgs{
    					DriverLogLevels: pulumi.StringMap{
    						"string": pulumi.String("string"),
    					},
    				},
    				Properties: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    				QueryFileUri: pulumi.String("string"),
    				QueryList: &dataproc.WorkflowTemplateJobSparkSqlJobQueryListArgs{
    					Queries: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    				},
    				ScriptVariables: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    			},
    		},
    	},
    	Location: pulumi.String("string"),
    	Placement: &dataproc.WorkflowTemplatePlacementArgs{
    		ClusterSelector: &dataproc.WorkflowTemplatePlacementClusterSelectorArgs{
    			ClusterLabels: pulumi.StringMap{
    				"string": pulumi.String("string"),
    			},
    			Zone: pulumi.String("string"),
    		},
    		ManagedCluster: &dataproc.WorkflowTemplatePlacementManagedClusterArgs{
    			ClusterName: pulumi.String("string"),
    			Config: &dataproc.WorkflowTemplatePlacementManagedClusterConfigArgs{
    				AutoscalingConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs{
    					Policy: pulumi.String("string"),
    				},
    				EncryptionConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs{
    					GcePdKmsKeyName: pulumi.String("string"),
    				},
    				EndpointConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs{
    					EnableHttpPortAccess: pulumi.Bool(false),
    					HttpPorts: pulumi.StringMap{
    						"string": pulumi.String("string"),
    					},
    				},
    				GceClusterConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs{
    					InternalIpOnly: pulumi.Bool(false),
    					Metadata: pulumi.StringMap{
    						"string": pulumi.String("string"),
    					},
    					Network: pulumi.String("string"),
    					NodeGroupAffinity: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs{
    						NodeGroup: pulumi.String("string"),
    					},
    					PrivateIpv6GoogleAccess: pulumi.String("string"),
    					ReservationAffinity: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs{
    						ConsumeReservationType: pulumi.String("string"),
    						Key:                    pulumi.String("string"),
    						Values: pulumi.StringArray{
    							pulumi.String("string"),
    						},
    					},
    					ServiceAccount: pulumi.String("string"),
    					ServiceAccountScopes: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    					ShieldedInstanceConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigArgs{
    						EnableIntegrityMonitoring: pulumi.Bool(false),
    						EnableSecureBoot:          pulumi.Bool(false),
    						EnableVtpm:                pulumi.Bool(false),
    					},
    					Subnetwork: pulumi.String("string"),
    					Tags: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    					Zone: pulumi.String("string"),
    				},
    				GkeClusterConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs{
    					NamespacedGkeDeploymentTarget: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs{
    						ClusterNamespace: pulumi.String("string"),
    						TargetGkeCluster: pulumi.String("string"),
    					},
    				},
    				InitializationActions: dataproc.WorkflowTemplatePlacementManagedClusterConfigInitializationActionArray{
    					&dataproc.WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs{
    						ExecutableFile:   pulumi.String("string"),
    						ExecutionTimeout: pulumi.String("string"),
    					},
    				},
    				LifecycleConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs{
    					AutoDeleteTime: pulumi.String("string"),
    					AutoDeleteTtl:  pulumi.String("string"),
    					IdleDeleteTtl:  pulumi.String("string"),
    					IdleStartTime:  pulumi.String("string"),
    				},
    				MasterConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs{
    					Accelerators: dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArray{
    						&dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs{
    							AcceleratorCount: pulumi.Int(0),
    							AcceleratorType:  pulumi.String("string"),
    						},
    					},
    					DiskConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs{
    						BootDiskSizeGb: pulumi.Int(0),
    						BootDiskType:   pulumi.String("string"),
    						NumLocalSsds:   pulumi.Int(0),
    					},
    					Image: pulumi.String("string"),
    					InstanceNames: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    					IsPreemptible: pulumi.Bool(false),
    					MachineType:   pulumi.String("string"),
    					ManagedGroupConfigs: dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArray{
    						&dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs{
    							InstanceGroupManagerName: pulumi.String("string"),
    							InstanceTemplateName:     pulumi.String("string"),
    						},
    					},
    					MinCpuPlatform: pulumi.String("string"),
    					NumInstances:   pulumi.Int(0),
    					Preemptibility: pulumi.String("string"),
    				},
    				MetastoreConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs{
    					DataprocMetastoreService: pulumi.String("string"),
    				},
    				SecondaryWorkerConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs{
    					Accelerators: dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArray{
    						&dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs{
    							AcceleratorCount: pulumi.Int(0),
    							AcceleratorType:  pulumi.String("string"),
    						},
    					},
    					DiskConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs{
    						BootDiskSizeGb: pulumi.Int(0),
    						BootDiskType:   pulumi.String("string"),
    						NumLocalSsds:   pulumi.Int(0),
    					},
    					Image: pulumi.String("string"),
    					InstanceNames: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    					IsPreemptible: pulumi.Bool(false),
    					MachineType:   pulumi.String("string"),
    					ManagedGroupConfigs: dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArray{
    						&dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs{
    							InstanceGroupManagerName: pulumi.String("string"),
    							InstanceTemplateName:     pulumi.String("string"),
    						},
    					},
    					MinCpuPlatform: pulumi.String("string"),
    					NumInstances:   pulumi.Int(0),
    					Preemptibility: pulumi.String("string"),
    				},
    				SecurityConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs{
    					KerberosConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs{
    						CrossRealmTrustAdminServer:    pulumi.String("string"),
    						CrossRealmTrustKdc:            pulumi.String("string"),
    						CrossRealmTrustRealm:          pulumi.String("string"),
    						CrossRealmTrustSharedPassword: pulumi.String("string"),
    						EnableKerberos:                pulumi.Bool(false),
    						KdcDbKey:                      pulumi.String("string"),
    						KeyPassword:                   pulumi.String("string"),
    						Keystore:                      pulumi.String("string"),
    						KeystorePassword:              pulumi.String("string"),
    						KmsKey:                        pulumi.String("string"),
    						Realm:                         pulumi.String("string"),
    						RootPrincipalPassword:         pulumi.String("string"),
    						TgtLifetimeHours:              pulumi.Int(0),
    						Truststore:                    pulumi.String("string"),
    						TruststorePassword:            pulumi.String("string"),
    					},
    				},
    				SoftwareConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs{
    					ImageVersion: pulumi.String("string"),
    					OptionalComponents: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    					Properties: pulumi.StringMap{
    						"string": pulumi.String("string"),
    					},
    				},
    				StagingBucket: pulumi.String("string"),
    				TempBucket:    pulumi.String("string"),
    				WorkerConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs{
    					Accelerators: dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArray{
    						&dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs{
    							AcceleratorCount: pulumi.Int(0),
    							AcceleratorType:  pulumi.String("string"),
    						},
    					},
    					DiskConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs{
    						BootDiskSizeGb: pulumi.Int(0),
    						BootDiskType:   pulumi.String("string"),
    						NumLocalSsds:   pulumi.Int(0),
    					},
    					Image: pulumi.String("string"),
    					InstanceNames: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    					IsPreemptible: pulumi.Bool(false),
    					MachineType:   pulumi.String("string"),
    					ManagedGroupConfigs: dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArray{
    						&dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs{
    							InstanceGroupManagerName: pulumi.String("string"),
    							InstanceTemplateName:     pulumi.String("string"),
    						},
    					},
    					MinCpuPlatform: pulumi.String("string"),
    					NumInstances:   pulumi.Int(0),
    					Preemptibility: pulumi.String("string"),
    				},
    			},
    			Labels: pulumi.StringMap{
    				"string": pulumi.String("string"),
    			},
    		},
    	},
    	DagTimeout: pulumi.String("string"),
    	Labels: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	Name: pulumi.String("string"),
    	Parameters: dataproc.WorkflowTemplateParameterArray{
    		&dataproc.WorkflowTemplateParameterArgs{
    			Fields: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			Name:        pulumi.String("string"),
    			Description: pulumi.String("string"),
    			Validation: &dataproc.WorkflowTemplateParameterValidationArgs{
    				Regex: &dataproc.WorkflowTemplateParameterValidationRegexArgs{
    					Regexes: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    				},
    				Values: &dataproc.WorkflowTemplateParameterValidationValuesArgs{
    					Values: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    				},
    			},
    		},
    	},
    	Project: pulumi.String("string"),
    })
    
    var workflowTemplateResource = new WorkflowTemplate("workflowTemplateResource", WorkflowTemplateArgs.builder()        
        .jobs(WorkflowTemplateJobArgs.builder()
            .stepId("string")
            .hadoopJob(WorkflowTemplateJobHadoopJobArgs.builder()
                .archiveUris("string")
                .args("string")
                .fileUris("string")
                .jarFileUris("string")
                .loggingConfig(WorkflowTemplateJobHadoopJobLoggingConfigArgs.builder()
                    .driverLogLevels(Map.of("string", "string"))
                    .build())
                .mainClass("string")
                .mainJarFileUri("string")
                .properties(Map.of("string", "string"))
                .build())
            .hiveJob(WorkflowTemplateJobHiveJobArgs.builder()
                .continueOnFailure(false)
                .jarFileUris("string")
                .properties(Map.of("string", "string"))
                .queryFileUri("string")
                .queryList(WorkflowTemplateJobHiveJobQueryListArgs.builder()
                    .queries("string")
                    .build())
                .scriptVariables(Map.of("string", "string"))
                .build())
            .labels(Map.of("string", "string"))
            .pigJob(WorkflowTemplateJobPigJobArgs.builder()
                .continueOnFailure(false)
                .jarFileUris("string")
                .loggingConfig(WorkflowTemplateJobPigJobLoggingConfigArgs.builder()
                    .driverLogLevels(Map.of("string", "string"))
                    .build())
                .properties(Map.of("string", "string"))
                .queryFileUri("string")
                .queryList(WorkflowTemplateJobPigJobQueryListArgs.builder()
                    .queries("string")
                    .build())
                .scriptVariables(Map.of("string", "string"))
                .build())
            .prerequisiteStepIds("string")
            .prestoJob(WorkflowTemplateJobPrestoJobArgs.builder()
                .clientTags("string")
                .continueOnFailure(false)
                .loggingConfig(WorkflowTemplateJobPrestoJobLoggingConfigArgs.builder()
                    .driverLogLevels(Map.of("string", "string"))
                    .build())
                .outputFormat("string")
                .properties(Map.of("string", "string"))
                .queryFileUri("string")
                .queryList(WorkflowTemplateJobPrestoJobQueryListArgs.builder()
                    .queries("string")
                    .build())
                .build())
            .pysparkJob(WorkflowTemplateJobPysparkJobArgs.builder()
                .mainPythonFileUri("string")
                .archiveUris("string")
                .args("string")
                .fileUris("string")
                .jarFileUris("string")
                .loggingConfig(WorkflowTemplateJobPysparkJobLoggingConfigArgs.builder()
                    .driverLogLevels(Map.of("string", "string"))
                    .build())
                .properties(Map.of("string", "string"))
                .pythonFileUris("string")
                .build())
            .scheduling(WorkflowTemplateJobSchedulingArgs.builder()
                .maxFailuresPerHour(0)
                .maxFailuresTotal(0)
                .build())
            .sparkJob(WorkflowTemplateJobSparkJobArgs.builder()
                .archiveUris("string")
                .args("string")
                .fileUris("string")
                .jarFileUris("string")
                .loggingConfig(WorkflowTemplateJobSparkJobLoggingConfigArgs.builder()
                    .driverLogLevels(Map.of("string", "string"))
                    .build())
                .mainClass("string")
                .mainJarFileUri("string")
                .properties(Map.of("string", "string"))
                .build())
            .sparkRJob(WorkflowTemplateJobSparkRJobArgs.builder()
                .mainRFileUri("string")
                .archiveUris("string")
                .args("string")
                .fileUris("string")
                .loggingConfig(WorkflowTemplateJobSparkRJobLoggingConfigArgs.builder()
                    .driverLogLevels(Map.of("string", "string"))
                    .build())
                .properties(Map.of("string", "string"))
                .build())
            .sparkSqlJob(WorkflowTemplateJobSparkSqlJobArgs.builder()
                .jarFileUris("string")
                .loggingConfig(WorkflowTemplateJobSparkSqlJobLoggingConfigArgs.builder()
                    .driverLogLevels(Map.of("string", "string"))
                    .build())
                .properties(Map.of("string", "string"))
                .queryFileUri("string")
                .queryList(WorkflowTemplateJobSparkSqlJobQueryListArgs.builder()
                    .queries("string")
                    .build())
                .scriptVariables(Map.of("string", "string"))
                .build())
            .build())
        .location("string")
        .placement(WorkflowTemplatePlacementArgs.builder()
            .clusterSelector(WorkflowTemplatePlacementClusterSelectorArgs.builder()
                .clusterLabels(Map.of("string", "string"))
                .zone("string")
                .build())
            .managedCluster(WorkflowTemplatePlacementManagedClusterArgs.builder()
                .clusterName("string")
                .config(WorkflowTemplatePlacementManagedClusterConfigArgs.builder()
                    .autoscalingConfig(WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs.builder()
                        .policy("string")
                        .build())
                    .encryptionConfig(WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs.builder()
                        .gcePdKmsKeyName("string")
                        .build())
                    .endpointConfig(WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs.builder()
                        .enableHttpPortAccess(false)
                        .httpPorts(Map.of("string", "string"))
                        .build())
                    .gceClusterConfig(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs.builder()
                        .internalIpOnly(false)
                        .metadata(Map.of("string", "string"))
                        .network("string")
                        .nodeGroupAffinity(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs.builder()
                            .nodeGroup("string")
                            .build())
                        .privateIpv6GoogleAccess("string")
                        .reservationAffinity(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs.builder()
                            .consumeReservationType("string")
                            .key("string")
                            .values("string")
                            .build())
                        .serviceAccount("string")
                        .serviceAccountScopes("string")
                        .shieldedInstanceConfig(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigArgs.builder()
                            .enableIntegrityMonitoring(false)
                            .enableSecureBoot(false)
                            .enableVtpm(false)
                            .build())
                        .subnetwork("string")
                        .tags("string")
                        .zone("string")
                        .build())
                    .gkeClusterConfig(WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs.builder()
                        .namespacedGkeDeploymentTarget(WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs.builder()
                            .clusterNamespace("string")
                            .targetGkeCluster("string")
                            .build())
                        .build())
                    .initializationActions(WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs.builder()
                        .executableFile("string")
                        .executionTimeout("string")
                        .build())
                    .lifecycleConfig(WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs.builder()
                        .autoDeleteTime("string")
                        .autoDeleteTtl("string")
                        .idleDeleteTtl("string")
                        .idleStartTime("string")
                        .build())
                    .masterConfig(WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs.builder()
                        .accelerators(WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs.builder()
                            .acceleratorCount(0)
                            .acceleratorType("string")
                            .build())
                        .diskConfig(WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs.builder()
                            .bootDiskSizeGb(0)
                            .bootDiskType("string")
                            .numLocalSsds(0)
                            .build())
                        .image("string")
                        .instanceNames("string")
                        .isPreemptible(false)
                        .machineType("string")
                        .managedGroupConfigs(WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs.builder()
                            .instanceGroupManagerName("string")
                            .instanceTemplateName("string")
                            .build())
                        .minCpuPlatform("string")
                        .numInstances(0)
                        .preemptibility("string")
                        .build())
                    .metastoreConfig(WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs.builder()
                        .dataprocMetastoreService("string")
                        .build())
                    .secondaryWorkerConfig(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs.builder()
                        .accelerators(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs.builder()
                            .acceleratorCount(0)
                            .acceleratorType("string")
                            .build())
                        .diskConfig(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs.builder()
                            .bootDiskSizeGb(0)
                            .bootDiskType("string")
                            .numLocalSsds(0)
                            .build())
                        .image("string")
                        .instanceNames("string")
                        .isPreemptible(false)
                        .machineType("string")
                        .managedGroupConfigs(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs.builder()
                            .instanceGroupManagerName("string")
                            .instanceTemplateName("string")
                            .build())
                        .minCpuPlatform("string")
                        .numInstances(0)
                        .preemptibility("string")
                        .build())
                    .securityConfig(WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs.builder()
                        .kerberosConfig(WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs.builder()
                            .crossRealmTrustAdminServer("string")
                            .crossRealmTrustKdc("string")
                            .crossRealmTrustRealm("string")
                            .crossRealmTrustSharedPassword("string")
                            .enableKerberos(false)
                            .kdcDbKey("string")
                            .keyPassword("string")
                            .keystore("string")
                            .keystorePassword("string")
                            .kmsKey("string")
                            .realm("string")
                            .rootPrincipalPassword("string")
                            .tgtLifetimeHours(0)
                            .truststore("string")
                            .truststorePassword("string")
                            .build())
                        .build())
                    .softwareConfig(WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs.builder()
                        .imageVersion("string")
                        .optionalComponents("string")
                        .properties(Map.of("string", "string"))
                        .build())
                    .stagingBucket("string")
                    .tempBucket("string")
                    .workerConfig(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs.builder()
                        .accelerators(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs.builder()
                            .acceleratorCount(0)
                            .acceleratorType("string")
                            .build())
                        .diskConfig(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs.builder()
                            .bootDiskSizeGb(0)
                            .bootDiskType("string")
                            .numLocalSsds(0)
                            .build())
                        .image("string")
                        .instanceNames("string")
                        .isPreemptible(false)
                        .machineType("string")
                        .managedGroupConfigs(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs.builder()
                            .instanceGroupManagerName("string")
                            .instanceTemplateName("string")
                            .build())
                        .minCpuPlatform("string")
                        .numInstances(0)
                        .preemptibility("string")
                        .build())
                    .build())
                .labels(Map.of("string", "string"))
                .build())
            .build())
        .dagTimeout("string")
        .labels(Map.of("string", "string"))
        .name("string")
        .parameters(WorkflowTemplateParameterArgs.builder()
            .fields("string")
            .name("string")
            .description("string")
            .validation(WorkflowTemplateParameterValidationArgs.builder()
                .regex(WorkflowTemplateParameterValidationRegexArgs.builder()
                    .regexes("string")
                    .build())
                .values(WorkflowTemplateParameterValidationValuesArgs.builder()
                    .values("string")
                    .build())
                .build())
            .build())
        .project("string")
        .build());
    
    workflow_template_resource = gcp.dataproc.WorkflowTemplate("workflowTemplateResource",
        jobs=[gcp.dataproc.WorkflowTemplateJobArgs(
            step_id="string",
            hadoop_job=gcp.dataproc.WorkflowTemplateJobHadoopJobArgs(
                archive_uris=["string"],
                args=["string"],
                file_uris=["string"],
                jar_file_uris=["string"],
                logging_config=gcp.dataproc.WorkflowTemplateJobHadoopJobLoggingConfigArgs(
                    driver_log_levels={
                        "string": "string",
                    },
                ),
                main_class="string",
                main_jar_file_uri="string",
                properties={
                    "string": "string",
                },
            ),
            hive_job=gcp.dataproc.WorkflowTemplateJobHiveJobArgs(
                continue_on_failure=False,
                jar_file_uris=["string"],
                properties={
                    "string": "string",
                },
                query_file_uri="string",
                query_list=gcp.dataproc.WorkflowTemplateJobHiveJobQueryListArgs(
                    queries=["string"],
                ),
                script_variables={
                    "string": "string",
                },
            ),
            labels={
                "string": "string",
            },
            pig_job=gcp.dataproc.WorkflowTemplateJobPigJobArgs(
                continue_on_failure=False,
                jar_file_uris=["string"],
                logging_config=gcp.dataproc.WorkflowTemplateJobPigJobLoggingConfigArgs(
                    driver_log_levels={
                        "string": "string",
                    },
                ),
                properties={
                    "string": "string",
                },
                query_file_uri="string",
                query_list=gcp.dataproc.WorkflowTemplateJobPigJobQueryListArgs(
                    queries=["string"],
                ),
                script_variables={
                    "string": "string",
                },
            ),
            prerequisite_step_ids=["string"],
            presto_job=gcp.dataproc.WorkflowTemplateJobPrestoJobArgs(
                client_tags=["string"],
                continue_on_failure=False,
                logging_config=gcp.dataproc.WorkflowTemplateJobPrestoJobLoggingConfigArgs(
                    driver_log_levels={
                        "string": "string",
                    },
                ),
                output_format="string",
                properties={
                    "string": "string",
                },
                query_file_uri="string",
                query_list=gcp.dataproc.WorkflowTemplateJobPrestoJobQueryListArgs(
                    queries=["string"],
                ),
            ),
            pyspark_job=gcp.dataproc.WorkflowTemplateJobPysparkJobArgs(
                main_python_file_uri="string",
                archive_uris=["string"],
                args=["string"],
                file_uris=["string"],
                jar_file_uris=["string"],
                logging_config=gcp.dataproc.WorkflowTemplateJobPysparkJobLoggingConfigArgs(
                    driver_log_levels={
                        "string": "string",
                    },
                ),
                properties={
                    "string": "string",
                },
                python_file_uris=["string"],
            ),
            scheduling=gcp.dataproc.WorkflowTemplateJobSchedulingArgs(
                max_failures_per_hour=0,
                max_failures_total=0,
            ),
            spark_job=gcp.dataproc.WorkflowTemplateJobSparkJobArgs(
                archive_uris=["string"],
                args=["string"],
                file_uris=["string"],
                jar_file_uris=["string"],
                logging_config=gcp.dataproc.WorkflowTemplateJobSparkJobLoggingConfigArgs(
                    driver_log_levels={
                        "string": "string",
                    },
                ),
                main_class="string",
                main_jar_file_uri="string",
                properties={
                    "string": "string",
                },
            ),
            spark_r_job=gcp.dataproc.WorkflowTemplateJobSparkRJobArgs(
                main_r_file_uri="string",
                archive_uris=["string"],
                args=["string"],
                file_uris=["string"],
                logging_config=gcp.dataproc.WorkflowTemplateJobSparkRJobLoggingConfigArgs(
                    driver_log_levels={
                        "string": "string",
                    },
                ),
                properties={
                    "string": "string",
                },
            ),
            spark_sql_job=gcp.dataproc.WorkflowTemplateJobSparkSqlJobArgs(
                jar_file_uris=["string"],
                logging_config=gcp.dataproc.WorkflowTemplateJobSparkSqlJobLoggingConfigArgs(
                    driver_log_levels={
                        "string": "string",
                    },
                ),
                properties={
                    "string": "string",
                },
                query_file_uri="string",
                query_list=gcp.dataproc.WorkflowTemplateJobSparkSqlJobQueryListArgs(
                    queries=["string"],
                ),
                script_variables={
                    "string": "string",
                },
            ),
        )],
        location="string",
        placement=gcp.dataproc.WorkflowTemplatePlacementArgs(
            cluster_selector=gcp.dataproc.WorkflowTemplatePlacementClusterSelectorArgs(
                cluster_labels={
                    "string": "string",
                },
                zone="string",
            ),
            managed_cluster=gcp.dataproc.WorkflowTemplatePlacementManagedClusterArgs(
                cluster_name="string",
                config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigArgs(
                    autoscaling_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs(
                        policy="string",
                    ),
                    encryption_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs(
                        gce_pd_kms_key_name="string",
                    ),
                    endpoint_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs(
                        enable_http_port_access=False,
                        http_ports={
                            "string": "string",
                        },
                    ),
                    gce_cluster_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs(
                        internal_ip_only=False,
                        metadata={
                            "string": "string",
                        },
                        network="string",
                        node_group_affinity=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs(
                            node_group="string",
                        ),
                        private_ipv6_google_access="string",
                        reservation_affinity=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs(
                            consume_reservation_type="string",
                            key="string",
                            values=["string"],
                        ),
                        service_account="string",
                        service_account_scopes=["string"],
                        shielded_instance_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigArgs(
                            enable_integrity_monitoring=False,
                            enable_secure_boot=False,
                            enable_vtpm=False,
                        ),
                        subnetwork="string",
                        tags=["string"],
                        zone="string",
                    ),
                    gke_cluster_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs(
                        namespaced_gke_deployment_target=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs(
                            cluster_namespace="string",
                            target_gke_cluster="string",
                        ),
                    ),
                    initialization_actions=[gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs(
                        executable_file="string",
                        execution_timeout="string",
                    )],
                    lifecycle_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs(
                        auto_delete_time="string",
                        auto_delete_ttl="string",
                        idle_delete_ttl="string",
                        idle_start_time="string",
                    ),
                    master_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs(
                        accelerators=[gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs(
                            accelerator_count=0,
                            accelerator_type="string",
                        )],
                        disk_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs(
                            boot_disk_size_gb=0,
                            boot_disk_type="string",
                            num_local_ssds=0,
                        ),
                        image="string",
                        instance_names=["string"],
                        is_preemptible=False,
                        machine_type="string",
                        managed_group_configs=[gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs(
                            instance_group_manager_name="string",
                            instance_template_name="string",
                        )],
                        min_cpu_platform="string",
                        num_instances=0,
                        preemptibility="string",
                    ),
                    metastore_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs(
                        dataproc_metastore_service="string",
                    ),
                    secondary_worker_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs(
                        accelerators=[gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs(
                            accelerator_count=0,
                            accelerator_type="string",
                        )],
                        disk_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs(
                            boot_disk_size_gb=0,
                            boot_disk_type="string",
                            num_local_ssds=0,
                        ),
                        image="string",
                        instance_names=["string"],
                        is_preemptible=False,
                        machine_type="string",
                        managed_group_configs=[gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs(
                            instance_group_manager_name="string",
                            instance_template_name="string",
                        )],
                        min_cpu_platform="string",
                        num_instances=0,
                        preemptibility="string",
                    ),
                    security_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs(
                        kerberos_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs(
                            cross_realm_trust_admin_server="string",
                            cross_realm_trust_kdc="string",
                            cross_realm_trust_realm="string",
                            cross_realm_trust_shared_password="string",
                            enable_kerberos=False,
                            kdc_db_key="string",
                            key_password="string",
                            keystore="string",
                            keystore_password="string",
                            kms_key="string",
                            realm="string",
                            root_principal_password="string",
                            tgt_lifetime_hours=0,
                            truststore="string",
                            truststore_password="string",
                        ),
                    ),
                    software_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs(
                        image_version="string",
                        optional_components=["string"],
                        properties={
                            "string": "string",
                        },
                    ),
                    staging_bucket="string",
                    temp_bucket="string",
                    worker_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs(
                        accelerators=[gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs(
                            accelerator_count=0,
                            accelerator_type="string",
                        )],
                        disk_config=gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs(
                            boot_disk_size_gb=0,
                            boot_disk_type="string",
                            num_local_ssds=0,
                        ),
                        image="string",
                        instance_names=["string"],
                        is_preemptible=False,
                        machine_type="string",
                        managed_group_configs=[gcp.dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs(
                            instance_group_manager_name="string",
                            instance_template_name="string",
                        )],
                        min_cpu_platform="string",
                        num_instances=0,
                        preemptibility="string",
                    ),
                ),
                labels={
                    "string": "string",
                },
            ),
        ),
        dag_timeout="string",
        labels={
            "string": "string",
        },
        name="string",
        parameters=[gcp.dataproc.WorkflowTemplateParameterArgs(
            fields=["string"],
            name="string",
            description="string",
            validation=gcp.dataproc.WorkflowTemplateParameterValidationArgs(
                regex=gcp.dataproc.WorkflowTemplateParameterValidationRegexArgs(
                    regexes=["string"],
                ),
                values=gcp.dataproc.WorkflowTemplateParameterValidationValuesArgs(
                    values=["string"],
                ),
            ),
        )],
        project="string")
    
    const workflowTemplateResource = new gcp.dataproc.WorkflowTemplate("workflowTemplateResource", {
        jobs: [{
            stepId: "string",
            hadoopJob: {
                archiveUris: ["string"],
                args: ["string"],
                fileUris: ["string"],
                jarFileUris: ["string"],
                loggingConfig: {
                    driverLogLevels: {
                        string: "string",
                    },
                },
                mainClass: "string",
                mainJarFileUri: "string",
                properties: {
                    string: "string",
                },
            },
            hiveJob: {
                continueOnFailure: false,
                jarFileUris: ["string"],
                properties: {
                    string: "string",
                },
                queryFileUri: "string",
                queryList: {
                    queries: ["string"],
                },
                scriptVariables: {
                    string: "string",
                },
            },
            labels: {
                string: "string",
            },
            pigJob: {
                continueOnFailure: false,
                jarFileUris: ["string"],
                loggingConfig: {
                    driverLogLevels: {
                        string: "string",
                    },
                },
                properties: {
                    string: "string",
                },
                queryFileUri: "string",
                queryList: {
                    queries: ["string"],
                },
                scriptVariables: {
                    string: "string",
                },
            },
            prerequisiteStepIds: ["string"],
            prestoJob: {
                clientTags: ["string"],
                continueOnFailure: false,
                loggingConfig: {
                    driverLogLevels: {
                        string: "string",
                    },
                },
                outputFormat: "string",
                properties: {
                    string: "string",
                },
                queryFileUri: "string",
                queryList: {
                    queries: ["string"],
                },
            },
            pysparkJob: {
                mainPythonFileUri: "string",
                archiveUris: ["string"],
                args: ["string"],
                fileUris: ["string"],
                jarFileUris: ["string"],
                loggingConfig: {
                    driverLogLevels: {
                        string: "string",
                    },
                },
                properties: {
                    string: "string",
                },
                pythonFileUris: ["string"],
            },
            scheduling: {
                maxFailuresPerHour: 0,
                maxFailuresTotal: 0,
            },
            sparkJob: {
                archiveUris: ["string"],
                args: ["string"],
                fileUris: ["string"],
                jarFileUris: ["string"],
                loggingConfig: {
                    driverLogLevels: {
                        string: "string",
                    },
                },
                mainClass: "string",
                mainJarFileUri: "string",
                properties: {
                    string: "string",
                },
            },
            sparkRJob: {
                mainRFileUri: "string",
                archiveUris: ["string"],
                args: ["string"],
                fileUris: ["string"],
                loggingConfig: {
                    driverLogLevels: {
                        string: "string",
                    },
                },
                properties: {
                    string: "string",
                },
            },
            sparkSqlJob: {
                jarFileUris: ["string"],
                loggingConfig: {
                    driverLogLevels: {
                        string: "string",
                    },
                },
                properties: {
                    string: "string",
                },
                queryFileUri: "string",
                queryList: {
                    queries: ["string"],
                },
                scriptVariables: {
                    string: "string",
                },
            },
        }],
        location: "string",
        placement: {
            clusterSelector: {
                clusterLabels: {
                    string: "string",
                },
                zone: "string",
            },
            managedCluster: {
                clusterName: "string",
                config: {
                    autoscalingConfig: {
                        policy: "string",
                    },
                    encryptionConfig: {
                        gcePdKmsKeyName: "string",
                    },
                    endpointConfig: {
                        enableHttpPortAccess: false,
                        httpPorts: {
                            string: "string",
                        },
                    },
                    gceClusterConfig: {
                        internalIpOnly: false,
                        metadata: {
                            string: "string",
                        },
                        network: "string",
                        nodeGroupAffinity: {
                            nodeGroup: "string",
                        },
                        privateIpv6GoogleAccess: "string",
                        reservationAffinity: {
                            consumeReservationType: "string",
                            key: "string",
                            values: ["string"],
                        },
                        serviceAccount: "string",
                        serviceAccountScopes: ["string"],
                        shieldedInstanceConfig: {
                            enableIntegrityMonitoring: false,
                            enableSecureBoot: false,
                            enableVtpm: false,
                        },
                        subnetwork: "string",
                        tags: ["string"],
                        zone: "string",
                    },
                    gkeClusterConfig: {
                        namespacedGkeDeploymentTarget: {
                            clusterNamespace: "string",
                            targetGkeCluster: "string",
                        },
                    },
                    initializationActions: [{
                        executableFile: "string",
                        executionTimeout: "string",
                    }],
                    lifecycleConfig: {
                        autoDeleteTime: "string",
                        autoDeleteTtl: "string",
                        idleDeleteTtl: "string",
                        idleStartTime: "string",
                    },
                    masterConfig: {
                        accelerators: [{
                            acceleratorCount: 0,
                            acceleratorType: "string",
                        }],
                        diskConfig: {
                            bootDiskSizeGb: 0,
                            bootDiskType: "string",
                            numLocalSsds: 0,
                        },
                        image: "string",
                        instanceNames: ["string"],
                        isPreemptible: false,
                        machineType: "string",
                        managedGroupConfigs: [{
                            instanceGroupManagerName: "string",
                            instanceTemplateName: "string",
                        }],
                        minCpuPlatform: "string",
                        numInstances: 0,
                        preemptibility: "string",
                    },
                    metastoreConfig: {
                        dataprocMetastoreService: "string",
                    },
                    secondaryWorkerConfig: {
                        accelerators: [{
                            acceleratorCount: 0,
                            acceleratorType: "string",
                        }],
                        diskConfig: {
                            bootDiskSizeGb: 0,
                            bootDiskType: "string",
                            numLocalSsds: 0,
                        },
                        image: "string",
                        instanceNames: ["string"],
                        isPreemptible: false,
                        machineType: "string",
                        managedGroupConfigs: [{
                            instanceGroupManagerName: "string",
                            instanceTemplateName: "string",
                        }],
                        minCpuPlatform: "string",
                        numInstances: 0,
                        preemptibility: "string",
                    },
                    securityConfig: {
                        kerberosConfig: {
                            crossRealmTrustAdminServer: "string",
                            crossRealmTrustKdc: "string",
                            crossRealmTrustRealm: "string",
                            crossRealmTrustSharedPassword: "string",
                            enableKerberos: false,
                            kdcDbKey: "string",
                            keyPassword: "string",
                            keystore: "string",
                            keystorePassword: "string",
                            kmsKey: "string",
                            realm: "string",
                            rootPrincipalPassword: "string",
                            tgtLifetimeHours: 0,
                            truststore: "string",
                            truststorePassword: "string",
                        },
                    },
                    softwareConfig: {
                        imageVersion: "string",
                        optionalComponents: ["string"],
                        properties: {
                            string: "string",
                        },
                    },
                    stagingBucket: "string",
                    tempBucket: "string",
                    workerConfig: {
                        accelerators: [{
                            acceleratorCount: 0,
                            acceleratorType: "string",
                        }],
                        diskConfig: {
                            bootDiskSizeGb: 0,
                            bootDiskType: "string",
                            numLocalSsds: 0,
                        },
                        image: "string",
                        instanceNames: ["string"],
                        isPreemptible: false,
                        machineType: "string",
                        managedGroupConfigs: [{
                            instanceGroupManagerName: "string",
                            instanceTemplateName: "string",
                        }],
                        minCpuPlatform: "string",
                        numInstances: 0,
                        preemptibility: "string",
                    },
                },
                labels: {
                    string: "string",
                },
            },
        },
        dagTimeout: "string",
        labels: {
            string: "string",
        },
        name: "string",
        parameters: [{
            fields: ["string"],
            name: "string",
            description: "string",
            validation: {
                regex: {
                    regexes: ["string"],
                },
                values: {
                    values: ["string"],
                },
            },
        }],
        project: "string",
    });
    
    type: gcp:dataproc:WorkflowTemplate
    properties:
        dagTimeout: string
        jobs:
            - hadoopJob:
                archiveUris:
                    - string
                args:
                    - string
                fileUris:
                    - string
                jarFileUris:
                    - string
                loggingConfig:
                    driverLogLevels:
                        string: string
                mainClass: string
                mainJarFileUri: string
                properties:
                    string: string
              hiveJob:
                continueOnFailure: false
                jarFileUris:
                    - string
                properties:
                    string: string
                queryFileUri: string
                queryList:
                    queries:
                        - string
                scriptVariables:
                    string: string
              labels:
                string: string
              pigJob:
                continueOnFailure: false
                jarFileUris:
                    - string
                loggingConfig:
                    driverLogLevels:
                        string: string
                properties:
                    string: string
                queryFileUri: string
                queryList:
                    queries:
                        - string
                scriptVariables:
                    string: string
              prerequisiteStepIds:
                - string
              prestoJob:
                clientTags:
                    - string
                continueOnFailure: false
                loggingConfig:
                    driverLogLevels:
                        string: string
                outputFormat: string
                properties:
                    string: string
                queryFileUri: string
                queryList:
                    queries:
                        - string
              pysparkJob:
                archiveUris:
                    - string
                args:
                    - string
                fileUris:
                    - string
                jarFileUris:
                    - string
                loggingConfig:
                    driverLogLevels:
                        string: string
                mainPythonFileUri: string
                properties:
                    string: string
                pythonFileUris:
                    - string
              scheduling:
                maxFailuresPerHour: 0
                maxFailuresTotal: 0
              sparkJob:
                archiveUris:
                    - string
                args:
                    - string
                fileUris:
                    - string
                jarFileUris:
                    - string
                loggingConfig:
                    driverLogLevels:
                        string: string
                mainClass: string
                mainJarFileUri: string
                properties:
                    string: string
              sparkRJob:
                archiveUris:
                    - string
                args:
                    - string
                fileUris:
                    - string
                loggingConfig:
                    driverLogLevels:
                        string: string
                mainRFileUri: string
                properties:
                    string: string
              sparkSqlJob:
                jarFileUris:
                    - string
                loggingConfig:
                    driverLogLevels:
                        string: string
                properties:
                    string: string
                queryFileUri: string
                queryList:
                    queries:
                        - string
                scriptVariables:
                    string: string
              stepId: string
        labels:
            string: string
        location: string
        name: string
        parameters:
            - description: string
              fields:
                - string
              name: string
              validation:
                regex:
                    regexes:
                        - string
                values:
                    values:
                        - string
        placement:
            clusterSelector:
                clusterLabels:
                    string: string
                zone: string
            managedCluster:
                clusterName: string
                config:
                    autoscalingConfig:
                        policy: string
                    encryptionConfig:
                        gcePdKmsKeyName: string
                    endpointConfig:
                        enableHttpPortAccess: false
                        httpPorts:
                            string: string
                    gceClusterConfig:
                        internalIpOnly: false
                        metadata:
                            string: string
                        network: string
                        nodeGroupAffinity:
                            nodeGroup: string
                        privateIpv6GoogleAccess: string
                        reservationAffinity:
                            consumeReservationType: string
                            key: string
                            values:
                                - string
                        serviceAccount: string
                        serviceAccountScopes:
                            - string
                        shieldedInstanceConfig:
                            enableIntegrityMonitoring: false
                            enableSecureBoot: false
                            enableVtpm: false
                        subnetwork: string
                        tags:
                            - string
                        zone: string
                    gkeClusterConfig:
                        namespacedGkeDeploymentTarget:
                            clusterNamespace: string
                            targetGkeCluster: string
                    initializationActions:
                        - executableFile: string
                          executionTimeout: string
                    lifecycleConfig:
                        autoDeleteTime: string
                        autoDeleteTtl: string
                        idleDeleteTtl: string
                        idleStartTime: string
                    masterConfig:
                        accelerators:
                            - acceleratorCount: 0
                              acceleratorType: string
                        diskConfig:
                            bootDiskSizeGb: 0
                            bootDiskType: string
                            numLocalSsds: 0
                        image: string
                        instanceNames:
                            - string
                        isPreemptible: false
                        machineType: string
                        managedGroupConfigs:
                            - instanceGroupManagerName: string
                              instanceTemplateName: string
                        minCpuPlatform: string
                        numInstances: 0
                        preemptibility: string
                    metastoreConfig:
                        dataprocMetastoreService: string
                    secondaryWorkerConfig:
                        accelerators:
                            - acceleratorCount: 0
                              acceleratorType: string
                        diskConfig:
                            bootDiskSizeGb: 0
                            bootDiskType: string
                            numLocalSsds: 0
                        image: string
                        instanceNames:
                            - string
                        isPreemptible: false
                        machineType: string
                        managedGroupConfigs:
                            - instanceGroupManagerName: string
                              instanceTemplateName: string
                        minCpuPlatform: string
                        numInstances: 0
                        preemptibility: string
                    securityConfig:
                        kerberosConfig:
                            crossRealmTrustAdminServer: string
                            crossRealmTrustKdc: string
                            crossRealmTrustRealm: string
                            crossRealmTrustSharedPassword: string
                            enableKerberos: false
                            kdcDbKey: string
                            keyPassword: string
                            keystore: string
                            keystorePassword: string
                            kmsKey: string
                            realm: string
                            rootPrincipalPassword: string
                            tgtLifetimeHours: 0
                            truststore: string
                            truststorePassword: string
                    softwareConfig:
                        imageVersion: string
                        optionalComponents:
                            - string
                        properties:
                            string: string
                    stagingBucket: string
                    tempBucket: string
                    workerConfig:
                        accelerators:
                            - acceleratorCount: 0
                              acceleratorType: string
                        diskConfig:
                            bootDiskSizeGb: 0
                            bootDiskType: string
                            numLocalSsds: 0
                        image: string
                        instanceNames:
                            - string
                        isPreemptible: false
                        machineType: string
                        managedGroupConfigs:
                            - instanceGroupManagerName: string
                              instanceTemplateName: string
                        minCpuPlatform: string
                        numInstances: 0
                        preemptibility: string
                labels:
                    string: string
        project: string
    

    WorkflowTemplate Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The WorkflowTemplate resource accepts the following input properties:

    Jobs List<WorkflowTemplateJob>
    Required. The Directed Acyclic Graph of Jobs to submit.
    Location string
    The location for the resource
    Placement WorkflowTemplatePlacement
    Required. WorkflowTemplate scheduling information.
    DagTimeout string
    Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
    Labels Dictionary<string, string>
    Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    Name string
    Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
    Parameters List<WorkflowTemplateParameter>
    Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
    Project string
    The project for the resource
    Version int
    Output only. The current version of this workflow template.

    Deprecated: version is not useful as a configurable field, and will be removed in the future.

    Jobs []WorkflowTemplateJobArgs
    Required. The Directed Acyclic Graph of Jobs to submit.
    Location string
    The location for the resource
    Placement WorkflowTemplatePlacementArgs
    Required. WorkflowTemplate scheduling information.
    DagTimeout string
    Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
    Labels map[string]string
    Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    Name string
    Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
    Parameters []WorkflowTemplateParameterArgs
    Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
    Project string
    The project for the resource
    Version int
    Output only. The current version of this workflow template.

    Deprecated: version is not useful as a configurable field, and will be removed in the future.

    jobs List<WorkflowTemplateJob>
    Required. The Directed Acyclic Graph of Jobs to submit.
    location String
    The location for the resource
    placement WorkflowTemplatePlacement
    Required. WorkflowTemplate scheduling information.
    dagTimeout String
    Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
    labels Map<String,String>
    Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    name String
    Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
    parameters List<WorkflowTemplateParameter>
    Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
    project String
    The project for the resource
    version Integer
    Output only. The current version of this workflow template.

    Deprecated: version is not useful as a configurable field, and will be removed in the future.

    jobs WorkflowTemplateJob[]
    Required. The Directed Acyclic Graph of Jobs to submit.
    location string
    The location for the resource
    placement WorkflowTemplatePlacement
    Required. WorkflowTemplate scheduling information.
    dagTimeout string
    Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
    labels {[key: string]: string}
    Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    name string
    Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
    parameters WorkflowTemplateParameter[]
    Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
    project string
    The project for the resource
    version number
    Output only. The current version of this workflow template.

    Deprecated: version is not useful as a configurable field, and will be removed in the future.

    jobs Sequence[WorkflowTemplateJobArgs]
    Required. The Directed Acyclic Graph of Jobs to submit.
    location str
    The location for the resource
    placement WorkflowTemplatePlacementArgs
    Required. WorkflowTemplate scheduling information.
    dag_timeout str
    Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
    labels Mapping[str, str]
    Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    name str
    Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
    parameters Sequence[WorkflowTemplateParameterArgs]
    Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
    project str
    The project for the resource
    version int
    Output only. The current version of this workflow template.

    Deprecated: version is not useful as a configurable field, and will be removed in the future.

    jobs List<Property Map>
    Required. The Directed Acyclic Graph of Jobs to submit.
    location String
    The location for the resource
    placement Property Map
    Required. WorkflowTemplate scheduling information.
    dagTimeout String
    Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
    labels Map<String>
    Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    name String
    Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
    parameters List<Property Map>
    Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
    project String
    The project for the resource
    version Number
    Output only. The current version of this workflow template.

    Deprecated: version is not useful as a configurable field, and will be removed in the future.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the WorkflowTemplate resource produces the following output properties:

    CreateTime string
    Output only. The time template was created.
    EffectiveLabels Dictionary<string, object>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
    Id string
    The provider-assigned unique ID for this managed resource.
    PulumiLabels Dictionary<string, object>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    UpdateTime string
    Output only. The time template was last updated.
    CreateTime string
    Output only. The time template was created.
    EffectiveLabels map[string]interface{}
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
    Id string
    The provider-assigned unique ID for this managed resource.
    PulumiLabels map[string]interface{}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    UpdateTime string
    Output only. The time template was last updated.
    createTime String
    Output only. The time template was created.
    effectiveLabels Map<String,Object>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
    id String
    The provider-assigned unique ID for this managed resource.
    pulumiLabels Map<String,Object>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    updateTime String
    Output only. The time template was last updated.
    createTime string
    Output only. The time template was created.
    effectiveLabels {[key: string]: any}
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
    id string
    The provider-assigned unique ID for this managed resource.
    pulumiLabels {[key: string]: any}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    updateTime string
    Output only. The time template was last updated.
    create_time str
    Output only. The time template was created.
    effective_labels Mapping[str, Any]
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
    id str
    The provider-assigned unique ID for this managed resource.
    pulumi_labels Mapping[str, Any]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    update_time str
    Output only. The time template was last updated.
    createTime String
    Output only. The time template was created.
    effectiveLabels Map<Any>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
    id String
    The provider-assigned unique ID for this managed resource.
    pulumiLabels Map<Any>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    updateTime String
    Output only. The time template was last updated.

    Look up Existing WorkflowTemplate Resource

    Get an existing WorkflowTemplate resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: WorkflowTemplateState, opts?: CustomResourceOptions): WorkflowTemplate
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            create_time: Optional[str] = None,
            dag_timeout: Optional[str] = None,
            effective_labels: Optional[Mapping[str, Any]] = None,
            jobs: Optional[Sequence[WorkflowTemplateJobArgs]] = None,
            labels: Optional[Mapping[str, str]] = None,
            location: Optional[str] = None,
            name: Optional[str] = None,
            parameters: Optional[Sequence[WorkflowTemplateParameterArgs]] = None,
            placement: Optional[WorkflowTemplatePlacementArgs] = None,
            project: Optional[str] = None,
            pulumi_labels: Optional[Mapping[str, Any]] = None,
            update_time: Optional[str] = None,
            version: Optional[int] = None) -> WorkflowTemplate
    func GetWorkflowTemplate(ctx *Context, name string, id IDInput, state *WorkflowTemplateState, opts ...ResourceOption) (*WorkflowTemplate, error)
    public static WorkflowTemplate Get(string name, Input<string> id, WorkflowTemplateState? state, CustomResourceOptions? opts = null)
    public static WorkflowTemplate get(String name, Output<String> id, WorkflowTemplateState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    CreateTime string
    Output only. The time template was created.
    DagTimeout string
    Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
    EffectiveLabels Dictionary<string, object>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
    Jobs List<WorkflowTemplateJob>
    Required. The Directed Acyclic Graph of Jobs to submit.
    Labels Dictionary<string, string>
    Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    Location string
    The location for the resource
    Name string
    Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
    Parameters List<WorkflowTemplateParameter>
    Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
    Placement WorkflowTemplatePlacement
    Required. WorkflowTemplate scheduling information.
    Project string
    The project for the resource
    PulumiLabels Dictionary<string, object>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    UpdateTime string
    Output only. The time template was last updated.
    Version int
    Output only. The current version of this workflow template.

    Deprecated: version is not useful as a configurable field, and will be removed in the future.

    CreateTime string
    Output only. The time template was created.
    DagTimeout string
    Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
    EffectiveLabels map[string]interface{}
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
    Jobs []WorkflowTemplateJobArgs
    Required. The Directed Acyclic Graph of Jobs to submit.
    Labels map[string]string
    Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    Location string
    The location for the resource
    Name string
    Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
    Parameters []WorkflowTemplateParameterArgs
    Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
    Placement WorkflowTemplatePlacementArgs
    Required. WorkflowTemplate scheduling information.
    Project string
    The project for the resource
    PulumiLabels map[string]interface{}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    UpdateTime string
    Output only. The time template was last updated.
    Version int
    Output only. The current version of this workflow template.

    Deprecated: version is not useful as a configurable field, and will be removed in the future.

    createTime String
    Output only. The time template was created.
    dagTimeout String
    Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
    effectiveLabels Map<String,Object>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
    jobs List<WorkflowTemplateJob>
    Required. The Directed Acyclic Graph of Jobs to submit.
    labels Map<String,String>
    Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    location String
    The location for the resource
    name String
    Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
    parameters List<WorkflowTemplateParameter>
    Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
    placement WorkflowTemplatePlacement
    Required. WorkflowTemplate scheduling information.
    project String
    The project for the resource
    pulumiLabels Map<String,Object>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    updateTime String
    Output only. The time template was last updated.
    version Integer
    Output only. The current version of this workflow template.

    Deprecated: version is not useful as a configurable field, and will be removed in the future.

    createTime string
    Output only. The time template was created.
    dagTimeout string
    Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
    effectiveLabels {[key: string]: any}
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
    jobs WorkflowTemplateJob[]
    Required. The Directed Acyclic Graph of Jobs to submit.
    labels {[key: string]: string}
    Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    location string
    The location for the resource
    name string
    Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
    parameters WorkflowTemplateParameter[]
    Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
    placement WorkflowTemplatePlacement
    Required. WorkflowTemplate scheduling information.
    project string
    The project for the resource
    pulumiLabels {[key: string]: any}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    updateTime string
    Output only. The time template was last updated.
    version number
    Output only. The current version of this workflow template.

    Deprecated: version is not useful as a configurable field, and will be removed in the future.

    create_time str
    Output only. The time template was created.
    dag_timeout str
    Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
    effective_labels Mapping[str, Any]
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
    jobs Sequence[WorkflowTemplateJobArgs]
    Required. The Directed Acyclic Graph of Jobs to submit.
    labels Mapping[str, str]
    Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    location str
    The location for the resource
    name str
    Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
    parameters Sequence[WorkflowTemplateParameterArgs]
    Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
    placement WorkflowTemplatePlacementArgs
    Required. WorkflowTemplate scheduling information.
    project str
    The project for the resource
    pulumi_labels Mapping[str, Any]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    update_time str
    Output only. The time template was last updated.
    version int
    Output only. The current version of this workflow template.

    Deprecated: version is not useful as a configurable field, and will be removed in the future.

    createTime String
    Output only. The time template was created.
    dagTimeout String
    Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
    effectiveLabels Map<Any>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
    jobs List<Property Map>
    Required. The Directed Acyclic Graph of Jobs to submit.
    labels Map<String>
    Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    location String
    The location for the resource
    name String
    Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
    parameters List<Property Map>
    Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
    placement Property Map
    Required. WorkflowTemplate scheduling information.
    project String
    The project for the resource
    pulumiLabels Map<Any>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    updateTime String
    Output only. The time template was last updated.
    version Number
    Output only. The current version of this workflow template.

    Deprecated: version is not useful as a configurable field, and will be removed in the future.

    Supporting Types

    WorkflowTemplateJob, WorkflowTemplateJobArgs

    StepId string
    Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
    HadoopJob WorkflowTemplateJobHadoopJob
    Job is a Hadoop job.
    HiveJob WorkflowTemplateJobHiveJob
    Job is a Hive job.
    Labels Dictionary<string, string>
    The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job.
    PigJob WorkflowTemplateJobPigJob
    Job is a Pig job.
    PrerequisiteStepIds List<string>
    The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
    PrestoJob WorkflowTemplateJobPrestoJob
    Job is a Presto job.
    PysparkJob WorkflowTemplateJobPysparkJob
    Job is a PySpark job.
    Scheduling WorkflowTemplateJobScheduling
    Job scheduling configuration.
    SparkJob WorkflowTemplateJobSparkJob
    Job is a Spark job.
    SparkRJob WorkflowTemplateJobSparkRJob
    Job is a SparkR job.
    SparkSqlJob WorkflowTemplateJobSparkSqlJob
    Job is a SparkSql job.
    StepId string
    Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
    HadoopJob WorkflowTemplateJobHadoopJob
    Job is a Hadoop job.
    HiveJob WorkflowTemplateJobHiveJob
    Job is a Hive job.
    Labels map[string]string
    The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job.
    PigJob WorkflowTemplateJobPigJob
    Job is a Pig job.
    PrerequisiteStepIds []string
    The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
    PrestoJob WorkflowTemplateJobPrestoJob
    Job is a Presto job.
    PysparkJob WorkflowTemplateJobPysparkJob
    Job is a PySpark job.
    Scheduling WorkflowTemplateJobScheduling
    Job scheduling configuration.
    SparkJob WorkflowTemplateJobSparkJob
    Job is a Spark job.
    SparkRJob WorkflowTemplateJobSparkRJob
    Job is a SparkR job.
    SparkSqlJob WorkflowTemplateJobSparkSqlJob
    Job is a SparkSql job.
    stepId String
    Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
    hadoopJob WorkflowTemplateJobHadoopJob
    Job is a Hadoop job.
    hiveJob WorkflowTemplateJobHiveJob
    Job is a Hive job.
    labels Map<String,String>
    The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job.
    pigJob WorkflowTemplateJobPigJob
    Job is a Pig job.
    prerequisiteStepIds List<String>
    The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
    prestoJob WorkflowTemplateJobPrestoJob
    Job is a Presto job.
    pysparkJob WorkflowTemplateJobPysparkJob
    Job is a PySpark job.
    scheduling WorkflowTemplateJobScheduling
    Job scheduling configuration.
    sparkJob WorkflowTemplateJobSparkJob
    Job is a Spark job.
    sparkRJob WorkflowTemplateJobSparkRJob
    Job is a SparkR job.
    sparkSqlJob WorkflowTemplateJobSparkSqlJob
    Job is a SparkSql job.
    stepId string
    Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
    hadoopJob WorkflowTemplateJobHadoopJob
    Job is a Hadoop job.
    hiveJob WorkflowTemplateJobHiveJob
    Job is a Hive job.
    labels {[key: string]: string}
    The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job.
    pigJob WorkflowTemplateJobPigJob
    Job is a Pig job.
    prerequisiteStepIds string[]
    The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
    prestoJob WorkflowTemplateJobPrestoJob
    Job is a Presto job.
    pysparkJob WorkflowTemplateJobPysparkJob
    Job is a PySpark job.
    scheduling WorkflowTemplateJobScheduling
    Job scheduling configuration.
    sparkJob WorkflowTemplateJobSparkJob
    Job is a Spark job.
    sparkRJob WorkflowTemplateJobSparkRJob
    Job is a SparkR job.
    sparkSqlJob WorkflowTemplateJobSparkSqlJob
    Job is a SparkSql job.
    step_id str
    Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
    hadoop_job WorkflowTemplateJobHadoopJob
    Job is a Hadoop job.
    hive_job WorkflowTemplateJobHiveJob
    Job is a Hive job.
    labels Mapping[str, str]
    The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job.
    pig_job WorkflowTemplateJobPigJob
    Job is a Pig job.
    prerequisite_step_ids Sequence[str]
    The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
    presto_job WorkflowTemplateJobPrestoJob
    Job is a Presto job.
    pyspark_job WorkflowTemplateJobPysparkJob
    Job is a PySpark job.
    scheduling WorkflowTemplateJobScheduling
    Job scheduling configuration.
    spark_job WorkflowTemplateJobSparkJob
    Job is a Spark job.
    spark_r_job WorkflowTemplateJobSparkRJob
    Job is a SparkR job.
    spark_sql_job WorkflowTemplateJobSparkSqlJob
    Job is a SparkSql job.
    stepId String
    Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
    hadoopJob Property Map
    Job is a Hadoop job.
    hiveJob Property Map
    Job is a Hive job.
    labels Map<String>
    The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job.
    pigJob Property Map
    Job is a Pig job.
    prerequisiteStepIds List<String>
    The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
    prestoJob Property Map
    Job is a Presto job.
    pysparkJob Property Map
    Job is a PySpark job.
    scheduling Property Map
    Job scheduling configuration.
    sparkJob Property Map
    Job is a Spark job.
    sparkRJob Property Map
    Job is a SparkR job.
    sparkSqlJob Property Map
    Job is a SparkSql job.

    WorkflowTemplateJobHadoopJob, WorkflowTemplateJobHadoopJobArgs

    ArchiveUris List<string>
    HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
    Args List<string>
    The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    FileUris List<string>
    HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
    JarFileUris List<string>
    Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
    LoggingConfig WorkflowTemplateJobHadoopJobLoggingConfig
    The runtime log config for job execution.
    MainClass string
    The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
    MainJarFileUri string
    The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
    Properties Dictionary<string, string>
    A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
    ArchiveUris []string
    HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
    Args []string
    The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    FileUris []string
    HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
    JarFileUris []string
    Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
    LoggingConfig WorkflowTemplateJobHadoopJobLoggingConfig
    The runtime log config for job execution.
    MainClass string
    The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
    MainJarFileUri string
    The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
    Properties map[string]string
    A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
    archiveUris List<String>
    HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    fileUris List<String>
    HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
    jarFileUris List<String>
    Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
    loggingConfig WorkflowTemplateJobHadoopJobLoggingConfig
    The runtime log config for job execution.
    mainClass String
    The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
    mainJarFileUri String
    The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
    properties Map<String,String>
    A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
    archiveUris string[]
    HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
    args string[]
    The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    fileUris string[]
    HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
    jarFileUris string[]
    Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
    loggingConfig WorkflowTemplateJobHadoopJobLoggingConfig
    The runtime log config for job execution.
    mainClass string
    The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
    mainJarFileUri string
    The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
    properties {[key: string]: string}
    A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
    archive_uris Sequence[str]
    HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
    args Sequence[str]
    The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    file_uris Sequence[str]
    HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
    jar_file_uris Sequence[str]
    Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
    logging_config WorkflowTemplateJobHadoopJobLoggingConfig
    The runtime log config for job execution.
    main_class str
    The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
    main_jar_file_uri str
    The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
    properties Mapping[str, str]
    A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
    archiveUris List<String>
    HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    fileUris List<String>
    HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
    jarFileUris List<String>
    Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
    loggingConfig Property Map
    The runtime log config for job execution.
    mainClass String
    The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
    mainJarFileUri String
    The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
    properties Map<String>
    A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.

    WorkflowTemplateJobHadoopJobLoggingConfig, WorkflowTemplateJobHadoopJobLoggingConfigArgs

    DriverLogLevels Dictionary<string, string>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    DriverLogLevels map[string]string
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String,String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels {[key: string]: string}
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driver_log_levels Mapping[str, str]
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

    WorkflowTemplateJobHiveJob, WorkflowTemplateJobHiveJobArgs

    ContinueOnFailure bool
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    JarFileUris List<string>
    HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
    Properties Dictionary<string, string>
    A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
    QueryFileUri string
    The HCFS URI of the script that contains Hive queries.
    QueryList WorkflowTemplateJobHiveJobQueryList
    A list of queries.
    ScriptVariables Dictionary<string, string>
    Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).
    ContinueOnFailure bool
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    JarFileUris []string
    HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
    Properties map[string]string
    A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
    QueryFileUri string
    The HCFS URI of the script that contains Hive queries.
    QueryList WorkflowTemplateJobHiveJobQueryList
    A list of queries.
    ScriptVariables map[string]string
    Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).
    continueOnFailure Boolean
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    jarFileUris List<String>
    HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
    properties Map<String,String>
    A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
    queryFileUri String
    The HCFS URI of the script that contains Hive queries.
    queryList WorkflowTemplateJobHiveJobQueryList
    A list of queries.
    scriptVariables Map<String,String>
    Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).
    continueOnFailure boolean
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    jarFileUris string[]
    HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
    properties {[key: string]: string}
    A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
    queryFileUri string
    The HCFS URI of the script that contains Hive queries.
    queryList WorkflowTemplateJobHiveJobQueryList
    A list of queries.
    scriptVariables {[key: string]: string}
    Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).
    continue_on_failure bool
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    jar_file_uris Sequence[str]
    HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
    properties Mapping[str, str]
    A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
    query_file_uri str
    The HCFS URI of the script that contains Hive queries.
    query_list WorkflowTemplateJobHiveJobQueryList
    A list of queries.
    script_variables Mapping[str, str]
    Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).
    continueOnFailure Boolean
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    jarFileUris List<String>
    HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
    properties Map<String>
    A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
    queryFileUri String
    The HCFS URI of the script that contains Hive queries.
    queryList Property Map
    A list of queries.
    scriptVariables Map<String>
    Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).

    WorkflowTemplateJobHiveJobQueryList, WorkflowTemplateJobHiveJobQueryListArgs

    Queries List<string>
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    Queries []string
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries List<String>
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries string[]
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries Sequence[str]
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries List<String>
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }

    WorkflowTemplateJobPigJob, WorkflowTemplateJobPigJobArgs

    ContinueOnFailure bool
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    JarFileUris List<string>
    HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
    LoggingConfig WorkflowTemplateJobPigJobLoggingConfig
    The runtime log config for job execution.
    Properties Dictionary<string, string>
    A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
    QueryFileUri string
    The HCFS URI of the script that contains the Pig queries.
    QueryList WorkflowTemplateJobPigJobQueryList
    A list of queries.
    ScriptVariables Dictionary<string, string>
    Mapping of query variable names to values (equivalent to the Pig command: name=).
    ContinueOnFailure bool
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    JarFileUris []string
    HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
    LoggingConfig WorkflowTemplateJobPigJobLoggingConfig
    The runtime log config for job execution.
    Properties map[string]string
    A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
    QueryFileUri string
    The HCFS URI of the script that contains the Pig queries.
    QueryList WorkflowTemplateJobPigJobQueryList
    A list of queries.
    ScriptVariables map[string]string
    Mapping of query variable names to values (equivalent to the Pig command: name=).
    continueOnFailure Boolean
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    jarFileUris List<String>
    HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
    loggingConfig WorkflowTemplateJobPigJobLoggingConfig
    The runtime log config for job execution.
    properties Map<String,String>
    A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
    queryFileUri String
    The HCFS URI of the script that contains the Pig queries.
    queryList WorkflowTemplateJobPigJobQueryList
    A list of queries.
    scriptVariables Map<String,String>
    Mapping of query variable names to values (equivalent to the Pig command: name=).
    continueOnFailure boolean
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    jarFileUris string[]
    HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
    loggingConfig WorkflowTemplateJobPigJobLoggingConfig
    The runtime log config for job execution.
    properties {[key: string]: string}
    A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
    queryFileUri string
    The HCFS URI of the script that contains the Pig queries.
    queryList WorkflowTemplateJobPigJobQueryList
    A list of queries.
    scriptVariables {[key: string]: string}
    Mapping of query variable names to values (equivalent to the Pig command: name=).
    continue_on_failure bool
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    jar_file_uris Sequence[str]
    HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
    logging_config WorkflowTemplateJobPigJobLoggingConfig
    The runtime log config for job execution.
    properties Mapping[str, str]
    A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
    query_file_uri str
    The HCFS URI of the script that contains the Pig queries.
    query_list WorkflowTemplateJobPigJobQueryList
    A list of queries.
    script_variables Mapping[str, str]
    Mapping of query variable names to values (equivalent to the Pig command: name=).
    continueOnFailure Boolean
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    jarFileUris List<String>
    HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
    loggingConfig Property Map
    The runtime log config for job execution.
    properties Map<String>
    A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
    queryFileUri String
    The HCFS URI of the script that contains the Pig queries.
    queryList Property Map
    A list of queries.
    scriptVariables Map<String>
    Mapping of query variable names to values (equivalent to the Pig command: name=).

    WorkflowTemplateJobPigJobLoggingConfig, WorkflowTemplateJobPigJobLoggingConfigArgs

    DriverLogLevels Dictionary<string, string>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    DriverLogLevels map[string]string
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String,String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels {[key: string]: string}
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driver_log_levels Mapping[str, str]
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

    WorkflowTemplateJobPigJobQueryList, WorkflowTemplateJobPigJobQueryListArgs

    Queries List<string>
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    Queries []string
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries List<String>
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries string[]
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries Sequence[str]
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries List<String>
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }

    WorkflowTemplateJobPrestoJob, WorkflowTemplateJobPrestoJobArgs

    ClientTags List<string>
    Presto client tags to attach to this query
    ContinueOnFailure bool
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    LoggingConfig WorkflowTemplateJobPrestoJobLoggingConfig
    The runtime log config for job execution.
    OutputFormat string
    The format in which query output will be displayed. See the Presto documentation for supported output formats
    Properties Dictionary<string, string>
    A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
    QueryFileUri string
    The HCFS URI of the script that contains SQL queries.
    QueryList WorkflowTemplateJobPrestoJobQueryList
    A list of queries.
    ClientTags []string
    Presto client tags to attach to this query
    ContinueOnFailure bool
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    LoggingConfig WorkflowTemplateJobPrestoJobLoggingConfig
    The runtime log config for job execution.
    OutputFormat string
    The format in which query output will be displayed. See the Presto documentation for supported output formats
    Properties map[string]string
    A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
    QueryFileUri string
    The HCFS URI of the script that contains SQL queries.
    QueryList WorkflowTemplateJobPrestoJobQueryList
    A list of queries.
    clientTags List<String>
    Presto client tags to attach to this query
    continueOnFailure Boolean
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    loggingConfig WorkflowTemplateJobPrestoJobLoggingConfig
    The runtime log config for job execution.
    outputFormat String
    The format in which query output will be displayed. See the Presto documentation for supported output formats
    properties Map<String,String>
    A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
    queryFileUri String
    The HCFS URI of the script that contains SQL queries.
    queryList WorkflowTemplateJobPrestoJobQueryList
    A list of queries.
    clientTags string[]
    Presto client tags to attach to this query
    continueOnFailure boolean
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    loggingConfig WorkflowTemplateJobPrestoJobLoggingConfig
    The runtime log config for job execution.
    outputFormat string
    The format in which query output will be displayed. See the Presto documentation for supported output formats
    properties {[key: string]: string}
    A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
    queryFileUri string
    The HCFS URI of the script that contains SQL queries.
    queryList WorkflowTemplateJobPrestoJobQueryList
    A list of queries.
    client_tags Sequence[str]
    Presto client tags to attach to this query
    continue_on_failure bool
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    logging_config WorkflowTemplateJobPrestoJobLoggingConfig
    The runtime log config for job execution.
    output_format str
    The format in which query output will be displayed. See the Presto documentation for supported output formats
    properties Mapping[str, str]
    A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
    query_file_uri str
    The HCFS URI of the script that contains SQL queries.
    query_list WorkflowTemplateJobPrestoJobQueryList
    A list of queries.
    clientTags List<String>
    Presto client tags to attach to this query
    continueOnFailure Boolean
    Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
    loggingConfig Property Map
    The runtime log config for job execution.
    outputFormat String
    The format in which query output will be displayed. See the Presto documentation for supported output formats
    properties Map<String>
    A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
    queryFileUri String
    The HCFS URI of the script that contains SQL queries.
    queryList Property Map
    A list of queries.

    WorkflowTemplateJobPrestoJobLoggingConfig, WorkflowTemplateJobPrestoJobLoggingConfigArgs

    DriverLogLevels Dictionary<string, string>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    DriverLogLevels map[string]string
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String,String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels {[key: string]: string}
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driver_log_levels Mapping[str, str]
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

    WorkflowTemplateJobPrestoJobQueryList, WorkflowTemplateJobPrestoJobQueryListArgs

    Queries List<string>
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    Queries []string
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries List<String>
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries string[]
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries Sequence[str]
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries List<String>
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }

    WorkflowTemplateJobPysparkJob, WorkflowTemplateJobPysparkJobArgs

    MainPythonFileUri string
    Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
    ArchiveUris List<string>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    Args List<string>
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    FileUris List<string>
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    JarFileUris List<string>
    HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
    LoggingConfig WorkflowTemplateJobPysparkJobLoggingConfig
    The runtime log config for job execution.
    Properties Dictionary<string, string>
    A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    PythonFileUris List<string>
    HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
    MainPythonFileUri string
    Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
    ArchiveUris []string
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    Args []string
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    FileUris []string
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    JarFileUris []string
    HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
    LoggingConfig WorkflowTemplateJobPysparkJobLoggingConfig
    The runtime log config for job execution.
    Properties map[string]string
    A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    PythonFileUris []string
    HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
    mainPythonFileUri String
    Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
    archiveUris List<String>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    fileUris List<String>
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    jarFileUris List<String>
    HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
    loggingConfig WorkflowTemplateJobPysparkJobLoggingConfig
    The runtime log config for job execution.
    properties Map<String,String>
    A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    pythonFileUris List<String>
    HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
    mainPythonFileUri string
    Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
    archiveUris string[]
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args string[]
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    fileUris string[]
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    jarFileUris string[]
    HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
    loggingConfig WorkflowTemplateJobPysparkJobLoggingConfig
    The runtime log config for job execution.
    properties {[key: string]: string}
    A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    pythonFileUris string[]
    HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
    main_python_file_uri str
    Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
    archive_uris Sequence[str]
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args Sequence[str]
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    file_uris Sequence[str]
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    jar_file_uris Sequence[str]
    HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
    logging_config WorkflowTemplateJobPysparkJobLoggingConfig
    The runtime log config for job execution.
    properties Mapping[str, str]
    A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    python_file_uris Sequence[str]
    HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
    mainPythonFileUri String
    Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
    archiveUris List<String>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    fileUris List<String>
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    jarFileUris List<String>
    HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
    loggingConfig Property Map
    The runtime log config for job execution.
    properties Map<String>
    A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    pythonFileUris List<String>
    HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

    WorkflowTemplateJobPysparkJobLoggingConfig, WorkflowTemplateJobPysparkJobLoggingConfigArgs

    DriverLogLevels Dictionary<string, string>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    DriverLogLevels map[string]string
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String,String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels {[key: string]: string}
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driver_log_levels Mapping[str, str]
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

    WorkflowTemplateJobScheduling, WorkflowTemplateJobSchedulingArgs

    MaxFailuresPerHour int
    Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
    MaxFailuresTotal int
    Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
    MaxFailuresPerHour int
    Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
    MaxFailuresTotal int
    Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
    maxFailuresPerHour Integer
    Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
    maxFailuresTotal Integer
    Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
    maxFailuresPerHour number
    Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
    maxFailuresTotal number
    Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
    max_failures_per_hour int
    Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
    max_failures_total int
    Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
    maxFailuresPerHour Number
    Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
    maxFailuresTotal Number
    Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240

    WorkflowTemplateJobSparkJob, WorkflowTemplateJobSparkJobArgs

    ArchiveUris List<string>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    Args List<string>
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    FileUris List<string>
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    JarFileUris List<string>
    HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
    LoggingConfig WorkflowTemplateJobSparkJobLoggingConfig
    The runtime log config for job execution.
    MainClass string
    The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris.
    MainJarFileUri string
    The HCFS URI of the jar file that contains the main class.
    Properties Dictionary<string, string>
    A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    ArchiveUris []string
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    Args []string
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    FileUris []string
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    JarFileUris []string
    HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
    LoggingConfig WorkflowTemplateJobSparkJobLoggingConfig
    The runtime log config for job execution.
    MainClass string
    The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris.
    MainJarFileUri string
    The HCFS URI of the jar file that contains the main class.
    Properties map[string]string
    A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    archiveUris List<String>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    fileUris List<String>
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    jarFileUris List<String>
    HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
    loggingConfig WorkflowTemplateJobSparkJobLoggingConfig
    The runtime log config for job execution.
    mainClass String
    The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris.
    mainJarFileUri String
    The HCFS URI of the jar file that contains the main class.
    properties Map<String,String>
    A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    archiveUris string[]
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args string[]
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    fileUris string[]
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    jarFileUris string[]
    HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
    loggingConfig WorkflowTemplateJobSparkJobLoggingConfig
    The runtime log config for job execution.
    mainClass string
    The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris.
    mainJarFileUri string
    The HCFS URI of the jar file that contains the main class.
    properties {[key: string]: string}
    A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    archive_uris Sequence[str]
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args Sequence[str]
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    file_uris Sequence[str]
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    jar_file_uris Sequence[str]
    HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
    logging_config WorkflowTemplateJobSparkJobLoggingConfig
    The runtime log config for job execution.
    main_class str
    The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris.
    main_jar_file_uri str
    The HCFS URI of the jar file that contains the main class.
    properties Mapping[str, str]
    A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    archiveUris List<String>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    fileUris List<String>
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    jarFileUris List<String>
    HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
    loggingConfig Property Map
    The runtime log config for job execution.
    mainClass String
    The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris.
    mainJarFileUri String
    The HCFS URI of the jar file that contains the main class.
    properties Map<String>
    A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.

    WorkflowTemplateJobSparkJobLoggingConfig, WorkflowTemplateJobSparkJobLoggingConfigArgs

    DriverLogLevels Dictionary<string, string>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    DriverLogLevels map[string]string
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String,String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels {[key: string]: string}
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driver_log_levels Mapping[str, str]
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

    WorkflowTemplateJobSparkRJob, WorkflowTemplateJobSparkRJobArgs

    MainRFileUri string
    Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
    ArchiveUris List<string>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    Args List<string>
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    FileUris List<string>
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    LoggingConfig WorkflowTemplateJobSparkRJobLoggingConfig
    The runtime log config for job execution.
    Properties Dictionary<string, string>
    A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    MainRFileUri string
    Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
    ArchiveUris []string
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    Args []string
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    FileUris []string
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    LoggingConfig WorkflowTemplateJobSparkRJobLoggingConfig
    The runtime log config for job execution.
    Properties map[string]string
    A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    mainRFileUri String
    Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
    archiveUris List<String>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    fileUris List<String>
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    loggingConfig WorkflowTemplateJobSparkRJobLoggingConfig
    The runtime log config for job execution.
    properties Map<String,String>
    A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    mainRFileUri string
    Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
    archiveUris string[]
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args string[]
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    fileUris string[]
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    loggingConfig WorkflowTemplateJobSparkRJobLoggingConfig
    The runtime log config for job execution.
    properties {[key: string]: string}
    A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    main_r_file_uri str
    Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
    archive_uris Sequence[str]
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args Sequence[str]
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    file_uris Sequence[str]
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    logging_config WorkflowTemplateJobSparkRJobLoggingConfig
    The runtime log config for job execution.
    properties Mapping[str, str]
    A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
    mainRFileUri String
    Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
    archiveUris List<String>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
    fileUris List<String>
    HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
    loggingConfig Property Map
    The runtime log config for job execution.
    properties Map<String>
    A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.

    WorkflowTemplateJobSparkRJobLoggingConfig, WorkflowTemplateJobSparkRJobLoggingConfigArgs

    DriverLogLevels Dictionary<string, string>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    DriverLogLevels map[string]string
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String,String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels {[key: string]: string}
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driver_log_levels Mapping[str, str]
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

    WorkflowTemplateJobSparkSqlJob, WorkflowTemplateJobSparkSqlJobArgs

    JarFileUris List<string>
    HCFS URIs of jar files to be added to the Spark CLASSPATH.
    LoggingConfig WorkflowTemplateJobSparkSqlJobLoggingConfig
    The runtime log config for job execution.
    Properties Dictionary<string, string>
    A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.
    QueryFileUri string
    The HCFS URI of the script that contains SQL queries.
    QueryList WorkflowTemplateJobSparkSqlJobQueryList
    A list of queries.
    ScriptVariables Dictionary<string, string>
    Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
    JarFileUris []string
    HCFS URIs of jar files to be added to the Spark CLASSPATH.
    LoggingConfig WorkflowTemplateJobSparkSqlJobLoggingConfig
    The runtime log config for job execution.
    Properties map[string]string
    A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.
    QueryFileUri string
    The HCFS URI of the script that contains SQL queries.
    QueryList WorkflowTemplateJobSparkSqlJobQueryList
    A list of queries.
    ScriptVariables map[string]string
    Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
    jarFileUris List<String>
    HCFS URIs of jar files to be added to the Spark CLASSPATH.
    loggingConfig WorkflowTemplateJobSparkSqlJobLoggingConfig
    The runtime log config for job execution.
    properties Map<String,String>
    A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.
    queryFileUri String
    The HCFS URI of the script that contains SQL queries.
    queryList WorkflowTemplateJobSparkSqlJobQueryList
    A list of queries.
    scriptVariables Map<String,String>
    Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
    jarFileUris string[]
    HCFS URIs of jar files to be added to the Spark CLASSPATH.
    loggingConfig WorkflowTemplateJobSparkSqlJobLoggingConfig
    The runtime log config for job execution.
    properties {[key: string]: string}
    A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.
    queryFileUri string
    The HCFS URI of the script that contains SQL queries.
    queryList WorkflowTemplateJobSparkSqlJobQueryList
    A list of queries.
    scriptVariables {[key: string]: string}
    Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
    jar_file_uris Sequence[str]
    HCFS URIs of jar files to be added to the Spark CLASSPATH.
    logging_config WorkflowTemplateJobSparkSqlJobLoggingConfig
    The runtime log config for job execution.
    properties Mapping[str, str]
    A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.
    query_file_uri str
    The HCFS URI of the script that contains SQL queries.
    query_list WorkflowTemplateJobSparkSqlJobQueryList
    A list of queries.
    script_variables Mapping[str, str]
    Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
    jarFileUris List<String>
    HCFS URIs of jar files to be added to the Spark CLASSPATH.
    loggingConfig Property Map
    The runtime log config for job execution.
    properties Map<String>
    A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.
    queryFileUri String
    The HCFS URI of the script that contains SQL queries.
    queryList Property Map
    A list of queries.
    scriptVariables Map<String>
    Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).

    WorkflowTemplateJobSparkSqlJobLoggingConfig, WorkflowTemplateJobSparkSqlJobLoggingConfigArgs

    DriverLogLevels Dictionary<string, string>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    DriverLogLevels map[string]string
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String,String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels {[key: string]: string}
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driver_log_levels Mapping[str, str]
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
    driverLogLevels Map<String>
    The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

    WorkflowTemplateJobSparkSqlJobQueryList, WorkflowTemplateJobSparkSqlJobQueryListArgs

    Queries List<string>
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    Queries []string
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries List<String>
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries string[]
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries Sequence[str]
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
    queries List<String>
    Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }

    WorkflowTemplateParameter, WorkflowTemplateParameterArgs

    Fields List<string>
    Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
    Name string
    Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
    Description string
    Brief description of the parameter. Must not exceed 1024 characters.
    Validation WorkflowTemplateParameterValidation
    Validation rules to be applied to this parameter's value.
    Fields []string
    Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
    Name string
    Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
    Description string
    Brief description of the parameter. Must not exceed 1024 characters.
    Validation WorkflowTemplateParameterValidation
    Validation rules to be applied to this parameter's value.
    fields List<String>
    Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
    name String
    Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
    description String
    Brief description of the parameter. Must not exceed 1024 characters.
    validation WorkflowTemplateParameterValidation
    Validation rules to be applied to this parameter's value.
    fields string[]
    Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
    name string
    Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
    description string
    Brief description of the parameter. Must not exceed 1024 characters.
    validation WorkflowTemplateParameterValidation
    Validation rules to be applied to this parameter's value.
    fields Sequence[str]
    Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
    name str
    Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
    description str
    Brief description of the parameter. Must not exceed 1024 characters.
    validation WorkflowTemplateParameterValidation
    Validation rules to be applied to this parameter's value.
    fields List<String>
    Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
    name String
    Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
    description String
    Brief description of the parameter. Must not exceed 1024 characters.
    validation Property Map
    Validation rules to be applied to this parameter's value.

    WorkflowTemplateParameterValidation, WorkflowTemplateParameterValidationArgs

    Regex WorkflowTemplateParameterValidationRegex
    Validation based on regular expressions.
    Values WorkflowTemplateParameterValidationValues
    Validation based on a list of allowed values.
    Regex WorkflowTemplateParameterValidationRegex
    Validation based on regular expressions.
    Values WorkflowTemplateParameterValidationValues
    Validation based on a list of allowed values.
    regex WorkflowTemplateParameterValidationRegex
    Validation based on regular expressions.
    values WorkflowTemplateParameterValidationValues
    Validation based on a list of allowed values.
    regex WorkflowTemplateParameterValidationRegex
    Validation based on regular expressions.
    values WorkflowTemplateParameterValidationValues
    Validation based on a list of allowed values.
    regex WorkflowTemplateParameterValidationRegex
    Validation based on regular expressions.
    values WorkflowTemplateParameterValidationValues
    Validation based on a list of allowed values.
    regex Property Map
    Validation based on regular expressions.
    values Property Map
    Validation based on a list of allowed values.

    WorkflowTemplateParameterValidationRegex, WorkflowTemplateParameterValidationRegexArgs

    Regexes List<string>
    Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
    Regexes []string
    Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
    regexes List<String>
    Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
    regexes string[]
    Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
    regexes Sequence[str]
    Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
    regexes List<String>
    Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).

    WorkflowTemplateParameterValidationValues, WorkflowTemplateParameterValidationValuesArgs

    Values List<string>
    Required. List of allowed values for the parameter.
    Values []string
    Required. List of allowed values for the parameter.
    values List<String>
    Required. List of allowed values for the parameter.
    values string[]
    Required. List of allowed values for the parameter.
    values Sequence[str]
    Required. List of allowed values for the parameter.
    values List<String>
    Required. List of allowed values for the parameter.

    WorkflowTemplatePlacement, WorkflowTemplatePlacementArgs

    ClusterSelector WorkflowTemplatePlacementClusterSelector
    A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
    ManagedCluster WorkflowTemplatePlacementManagedCluster
    A cluster that is managed by the workflow.
    ClusterSelector WorkflowTemplatePlacementClusterSelector
    A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
    ManagedCluster WorkflowTemplatePlacementManagedCluster
    A cluster that is managed by the workflow.
    clusterSelector WorkflowTemplatePlacementClusterSelector
    A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
    managedCluster WorkflowTemplatePlacementManagedCluster
    A cluster that is managed by the workflow.
    clusterSelector WorkflowTemplatePlacementClusterSelector
    A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
    managedCluster WorkflowTemplatePlacementManagedCluster
    A cluster that is managed by the workflow.
    cluster_selector WorkflowTemplatePlacementClusterSelector
    A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
    managed_cluster WorkflowTemplatePlacementManagedCluster
    A cluster that is managed by the workflow.
    clusterSelector Property Map
    A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
    managedCluster Property Map
    A cluster that is managed by the workflow.

    WorkflowTemplatePlacementClusterSelector, WorkflowTemplatePlacementClusterSelectorArgs

    ClusterLabels Dictionary<string, string>
    Required. The cluster labels. Cluster must have all labels to match.
    Zone string
    The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.
    ClusterLabels map[string]string
    Required. The cluster labels. Cluster must have all labels to match.
    Zone string
    The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.
    clusterLabels Map<String,String>
    Required. The cluster labels. Cluster must have all labels to match.
    zone String
    The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.
    clusterLabels {[key: string]: string}
    Required. The cluster labels. Cluster must have all labels to match.
    zone string
    The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.
    cluster_labels Mapping[str, str]
    Required. The cluster labels. Cluster must have all labels to match.
    zone str
    The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.
    clusterLabels Map<String>
    Required. The cluster labels. Cluster must have all labels to match.
    zone String
    The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.

    WorkflowTemplatePlacementManagedCluster, WorkflowTemplatePlacementManagedClusterArgs

    ClusterName string
    Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
    Config WorkflowTemplatePlacementManagedClusterConfig
    Required. The cluster configuration.
    Labels Dictionary<string, string>
    The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
    ClusterName string
    Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
    Config WorkflowTemplatePlacementManagedClusterConfig
    Required. The cluster configuration.
    Labels map[string]string
    The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
    clusterName String
    Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
    config WorkflowTemplatePlacementManagedClusterConfig
    Required. The cluster configuration.
    labels Map<String,String>
    The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
    clusterName string
    Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
    config WorkflowTemplatePlacementManagedClusterConfig
    Required. The cluster configuration.
    labels {[key: string]: string}
    The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
    cluster_name str
    Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
    config WorkflowTemplatePlacementManagedClusterConfig
    Required. The cluster configuration.
    labels Mapping[str, str]
    The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
    clusterName String
    Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
    config Property Map
    Required. The cluster configuration.
    labels Map<String>
    The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.

    WorkflowTemplatePlacementManagedClusterConfig, WorkflowTemplatePlacementManagedClusterConfigArgs

    AutoscalingConfig WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig
    Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
    EncryptionConfig WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig
    Encryption settings for the cluster.
    EndpointConfig WorkflowTemplatePlacementManagedClusterConfigEndpointConfig
    Port/endpoint configuration for this cluster
    GceClusterConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig
    The shared Compute Engine config settings for all instances in a cluster.
    GkeClusterConfig WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig
    The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.
    InitializationActions List<WorkflowTemplatePlacementManagedClusterConfigInitializationAction>
    Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
    LifecycleConfig WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig
    Lifecycle setting for the cluster.
    MasterConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfig
    The Compute Engine config settings for additional worker instances in a cluster.
    MetastoreConfig WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig
    Metastore configuration.
    SecondaryWorkerConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig
    The Compute Engine config settings for additional worker instances in a cluster.
    SecurityConfig WorkflowTemplatePlacementManagedClusterConfigSecurityConfig
    Security settings for the cluster.
    SoftwareConfig WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig
    The config settings for software inside the cluster.
    StagingBucket string
    A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
    TempBucket string
    A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
    WorkerConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfig
    The Compute Engine config settings for additional worker instances in a cluster.


    AutoscalingConfig WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig
    Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
    EncryptionConfig WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig
    Encryption settings for the cluster.
    EndpointConfig WorkflowTemplatePlacementManagedClusterConfigEndpointConfig
    Port/endpoint configuration for this cluster
    GceClusterConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig
    The shared Compute Engine config settings for all instances in a cluster.
    GkeClusterConfig WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig
    The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.
    InitializationActions []WorkflowTemplatePlacementManagedClusterConfigInitializationAction
    Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
    LifecycleConfig WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig
    Lifecycle setting for the cluster.
    MasterConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfig
    The Compute Engine config settings for additional worker instances in a cluster.
    MetastoreConfig WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig
    Metastore configuration.
    SecondaryWorkerConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig
    The Compute Engine config settings for additional worker instances in a cluster.
    SecurityConfig WorkflowTemplatePlacementManagedClusterConfigSecurityConfig
    Security settings for the cluster.
    SoftwareConfig WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig
    The config settings for software inside the cluster.
    StagingBucket string
    A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
    TempBucket string
    A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
    WorkerConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfig
    The Compute Engine config settings for additional worker instances in a cluster.


    autoscalingConfig WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig
    Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
    encryptionConfig WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig
    Encryption settings for the cluster.
    endpointConfig WorkflowTemplatePlacementManagedClusterConfigEndpointConfig
    Port/endpoint configuration for this cluster
    gceClusterConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig
    The shared Compute Engine config settings for all instances in a cluster.
    gkeClusterConfig WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig
    The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.
    initializationActions List<WorkflowTemplatePlacementManagedClusterConfigInitializationAction>
    Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
    lifecycleConfig WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig
    Lifecycle setting for the cluster.
    masterConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfig
    The Compute Engine config settings for additional worker instances in a cluster.
    metastoreConfig WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig
    Metastore configuration.
    secondaryWorkerConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig
    The Compute Engine config settings for additional worker instances in a cluster.
    securityConfig WorkflowTemplatePlacementManagedClusterConfigSecurityConfig
    Security settings for the cluster.
    softwareConfig WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig
    The config settings for software inside the cluster.
    stagingBucket String
    A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
    tempBucket String
    A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
    workerConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfig
    The Compute Engine config settings for additional worker instances in a cluster.


    autoscalingConfig WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig
    Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
    encryptionConfig WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig
    Encryption settings for the cluster.
    endpointConfig WorkflowTemplatePlacementManagedClusterConfigEndpointConfig
    Port/endpoint configuration for this cluster
    gceClusterConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig
    The shared Compute Engine config settings for all instances in a cluster.
    gkeClusterConfig WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig
    The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.
    initializationActions WorkflowTemplatePlacementManagedClusterConfigInitializationAction[]
    Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
    lifecycleConfig WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig
    Lifecycle setting for the cluster.
    masterConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfig
    The Compute Engine config settings for additional worker instances in a cluster.
    metastoreConfig WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig
    Metastore configuration.
    secondaryWorkerConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig
    The Compute Engine config settings for additional worker instances in a cluster.
    securityConfig WorkflowTemplatePlacementManagedClusterConfigSecurityConfig
    Security settings for the cluster.
    softwareConfig WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig
    The config settings for software inside the cluster.
    stagingBucket string
    A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
    tempBucket string
    A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
    workerConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfig
    The Compute Engine config settings for additional worker instances in a cluster.


    autoscaling_config WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig
    Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
    encryption_config WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig
    Encryption settings for the cluster.
    endpoint_config WorkflowTemplatePlacementManagedClusterConfigEndpointConfig
    Port/endpoint configuration for this cluster
    gce_cluster_config WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig
    The shared Compute Engine config settings for all instances in a cluster.
    gke_cluster_config WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig
    The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.
    initialization_actions Sequence[WorkflowTemplatePlacementManagedClusterConfigInitializationAction]
    Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
    lifecycle_config WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig
    Lifecycle setting for the cluster.
    master_config WorkflowTemplatePlacementManagedClusterConfigMasterConfig
    The Compute Engine config settings for additional worker instances in a cluster.
    metastore_config WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig
    Metastore configuration.
    secondary_worker_config WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig
    The Compute Engine config settings for additional worker instances in a cluster.
    security_config WorkflowTemplatePlacementManagedClusterConfigSecurityConfig
    Security settings for the cluster.
    software_config WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig
    The config settings for software inside the cluster.
    staging_bucket str
    A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
    temp_bucket str
    A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
    worker_config WorkflowTemplatePlacementManagedClusterConfigWorkerConfig
    The Compute Engine config settings for additional worker instances in a cluster.


    autoscalingConfig Property Map
    Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
    encryptionConfig Property Map
    Encryption settings for the cluster.
    endpointConfig Property Map
    Port/endpoint configuration for this cluster
    gceClusterConfig Property Map
    The shared Compute Engine config settings for all instances in a cluster.
    gkeClusterConfig Property Map
    The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.
    initializationActions List<Property Map>
    Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
    lifecycleConfig Property Map
    Lifecycle setting for the cluster.
    masterConfig Property Map
    The Compute Engine config settings for additional worker instances in a cluster.
    metastoreConfig Property Map
    Metastore configuration.
    secondaryWorkerConfig Property Map
    The Compute Engine config settings for additional worker instances in a cluster.
    securityConfig Property Map
    Security settings for the cluster.
    softwareConfig Property Map
    The config settings for software inside the cluster.
    stagingBucket String
    A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
    tempBucket String
    A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
    workerConfig Property Map
    The Compute Engine config settings for additional worker instances in a cluster.


    WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs

    Policy string
    The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ Note that the policy must be in the same project and Dataproc region.
    Policy string
    The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ Note that the policy must be in the same project and Dataproc region.
    policy String
    The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ Note that the policy must be in the same project and Dataproc region.
    policy string
    The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ Note that the policy must be in the same project and Dataproc region.
    policy str
    The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ Note that the policy must be in the same project and Dataproc region.
    policy String
    The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ Note that the policy must be in the same project and Dataproc region.

    WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs

    GcePdKmsKeyName string
    The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
    GcePdKmsKeyName string
    The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
    gcePdKmsKeyName String
    The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
    gcePdKmsKeyName string
    The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
    gce_pd_kms_key_name str
    The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
    gcePdKmsKeyName String
    The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.

    WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs

    EnableHttpPortAccess bool
    If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
    HttpPorts Dictionary<string, string>
    Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
    EnableHttpPortAccess bool
    If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
    HttpPorts map[string]string
    Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
    enableHttpPortAccess Boolean
    If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
    httpPorts Map<String,String>
    Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
    enableHttpPortAccess boolean
    If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
    httpPorts {[key: string]: string}
    Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
    enable_http_port_access bool
    If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
    http_ports Mapping[str, str]
    Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
    enableHttpPortAccess Boolean
    If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
    httpPorts Map<String>
    Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.

    WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs

    InternalIpOnly bool
    If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
    Metadata Dictionary<string, string>
    The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    Network string
    The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*default`
    NodeGroupAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity
    Node Group Affinity for sole-tenant clusters.
    PrivateIpv6GoogleAccess string
    The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
    ReservationAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity
    Reservation Affinity for consuming Zonal reservation.
    ServiceAccount string
    The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
    ServiceAccountScopes List<string>
    The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
    ShieldedInstanceConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig
    Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below.
    Subnetwork string
    The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0 * sub0
    Tags List<string>
    The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
    Zone string
    The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ * us-central1-f
    InternalIpOnly bool
    If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
    Metadata map[string]string
    The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    Network string
    The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*default`
    NodeGroupAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity
    Node Group Affinity for sole-tenant clusters.
    PrivateIpv6GoogleAccess string
    The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
    ReservationAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity
    Reservation Affinity for consuming Zonal reservation.
    ServiceAccount string
    The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
    ServiceAccountScopes []string
    The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
    ShieldedInstanceConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig
    Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below.
    Subnetwork string
    The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0 * sub0
    Tags []string
    The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
    Zone string
    The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ * us-central1-f
    internalIpOnly Boolean
    If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
    metadata Map<String,String>
    The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    network String
    The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*default`
    nodeGroupAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity
    Node Group Affinity for sole-tenant clusters.
    privateIpv6GoogleAccess String
    The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
    reservationAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity
    Reservation Affinity for consuming Zonal reservation.
    serviceAccount String
    The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
    serviceAccountScopes List<String>
    The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
    shieldedInstanceConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig
    Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below.
    subnetwork String
    The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0 * sub0
    tags List<String>
    The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
    zone String
    The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ * us-central1-f
    internalIpOnly boolean
    If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
    metadata {[key: string]: string}
    The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    network string
    The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*default`
    nodeGroupAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity
    Node Group Affinity for sole-tenant clusters.
    privateIpv6GoogleAccess string
    The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
    reservationAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity
    Reservation Affinity for consuming Zonal reservation.
    serviceAccount string
    The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
    serviceAccountScopes string[]
    The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
    shieldedInstanceConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig
    Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below.
    subnetwork string
    The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0 * sub0
    tags string[]
    The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
    zone string
    The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ * us-central1-f
    internal_ip_only bool
    If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
    metadata Mapping[str, str]
    The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    network str
    The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*default`
    node_group_affinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity
    Node Group Affinity for sole-tenant clusters.
    private_ipv6_google_access str
    The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
    reservation_affinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity
    Reservation Affinity for consuming Zonal reservation.
    service_account str
    The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
    service_account_scopes Sequence[str]
    The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
    shielded_instance_config WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig
    Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below.
    subnetwork str
    The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0 * sub0
    tags Sequence[str]
    The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
    zone str
    The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ * us-central1-f
    internalIpOnly Boolean
    If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
    metadata Map<String>
    The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    network String
    The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*default`
    nodeGroupAffinity Property Map
    Node Group Affinity for sole-tenant clusters.
    privateIpv6GoogleAccess String
    The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
    reservationAffinity Property Map
    Reservation Affinity for consuming Zonal reservation.
    serviceAccount String
    The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
    serviceAccountScopes List<String>
    The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
    shieldedInstanceConfig Property Map
    Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below.
    subnetwork String
    The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0 * sub0
    tags List<String>
    The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
    zone String
    The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ * us-central1-f

    WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs

    NodeGroup string
    Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1*node-group-1`
    NodeGroup string
    Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1*node-group-1`
    nodeGroup String
    Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1*node-group-1`
    nodeGroup string
    Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1*node-group-1`
    node_group str
    Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1*node-group-1`
    nodeGroup String
    Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1*node-group-1`

    WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs

    ConsumeReservationType string
    Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
    Key string
    Corresponds to the label key of reservation resource.
    Values List<string>
    Corresponds to the label values of reservation resource.
    ConsumeReservationType string
    Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
    Key string
    Corresponds to the label key of reservation resource.
    Values []string
    Corresponds to the label values of reservation resource.
    consumeReservationType String
    Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
    key String
    Corresponds to the label key of reservation resource.
    values List<String>
    Corresponds to the label values of reservation resource.
    consumeReservationType string
    Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
    key string
    Corresponds to the label key of reservation resource.
    values string[]
    Corresponds to the label values of reservation resource.
    consume_reservation_type str
    Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
    key str
    Corresponds to the label key of reservation resource.
    values Sequence[str]
    Corresponds to the label values of reservation resource.
    consumeReservationType String
    Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
    key String
    Corresponds to the label key of reservation resource.
    values List<String>
    Corresponds to the label values of reservation resource.

    WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigArgs

    EnableIntegrityMonitoring bool
    Defines whether instances have Integrity Monitoring enabled.
    EnableSecureBoot bool
    Defines whether instances have Secure Boot enabled.
    EnableVtpm bool
    Defines whether instances have the vTPM enabled.
    EnableIntegrityMonitoring bool
    Defines whether instances have Integrity Monitoring enabled.
    EnableSecureBoot bool
    Defines whether instances have Secure Boot enabled.
    EnableVtpm bool
    Defines whether instances have the vTPM enabled.
    enableIntegrityMonitoring Boolean
    Defines whether instances have Integrity Monitoring enabled.
    enableSecureBoot Boolean
    Defines whether instances have Secure Boot enabled.
    enableVtpm Boolean
    Defines whether instances have the vTPM enabled.
    enableIntegrityMonitoring boolean
    Defines whether instances have Integrity Monitoring enabled.
    enableSecureBoot boolean
    Defines whether instances have Secure Boot enabled.
    enableVtpm boolean
    Defines whether instances have the vTPM enabled.
    enable_integrity_monitoring bool
    Defines whether instances have Integrity Monitoring enabled.
    enable_secure_boot bool
    Defines whether instances have Secure Boot enabled.
    enable_vtpm bool
    Defines whether instances have the vTPM enabled.
    enableIntegrityMonitoring Boolean
    Defines whether instances have Integrity Monitoring enabled.
    enableSecureBoot Boolean
    Defines whether instances have Secure Boot enabled.
    enableVtpm Boolean
    Defines whether instances have the vTPM enabled.

    WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs

    namespacedGkeDeploymentTarget Property Map
    A target for the deployment.

    WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs

    ClusterNamespace string
    A namespace within the GKE cluster to deploy into.
    TargetGkeCluster string
    The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
    ClusterNamespace string
    A namespace within the GKE cluster to deploy into.
    TargetGkeCluster string
    The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
    clusterNamespace String
    A namespace within the GKE cluster to deploy into.
    targetGkeCluster String
    The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
    clusterNamespace string
    A namespace within the GKE cluster to deploy into.
    targetGkeCluster string
    The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
    cluster_namespace str
    A namespace within the GKE cluster to deploy into.
    target_gke_cluster str
    The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
    clusterNamespace String
    A namespace within the GKE cluster to deploy into.
    targetGkeCluster String
    The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'

    WorkflowTemplatePlacementManagedClusterConfigInitializationAction, WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs

    ExecutableFile string
    Required. Cloud Storage URI of executable file.
    ExecutionTimeout string
    Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
    ExecutableFile string
    Required. Cloud Storage URI of executable file.
    ExecutionTimeout string
    Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
    executableFile String
    Required. Cloud Storage URI of executable file.
    executionTimeout String
    Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
    executableFile string
    Required. Cloud Storage URI of executable file.
    executionTimeout string
    Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
    executable_file str
    Required. Cloud Storage URI of executable file.
    execution_timeout str
    Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
    executableFile String
    Required. Cloud Storage URI of executable file.
    executionTimeout String
    Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.

    WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs

    AutoDeleteTime string
    The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    AutoDeleteTtl string
    The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    IdleDeleteTtl string
    The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json).
    IdleStartTime string
    Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    AutoDeleteTime string
    The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    AutoDeleteTtl string
    The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    IdleDeleteTtl string
    The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json).
    IdleStartTime string
    Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    autoDeleteTime String
    The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    autoDeleteTtl String
    The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    idleDeleteTtl String
    The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json).
    idleStartTime String
    Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    autoDeleteTime string
    The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    autoDeleteTtl string
    The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    idleDeleteTtl string
    The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json).
    idleStartTime string
    Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    auto_delete_time str
    The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    auto_delete_ttl str
    The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    idle_delete_ttl str
    The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json).
    idle_start_time str
    Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    autoDeleteTime String
    The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    autoDeleteTtl String
    The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    idleDeleteTtl String
    The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json).
    idleStartTime String
    Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).

    WorkflowTemplatePlacementManagedClusterConfigMasterConfig, WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs

    Accelerators List<WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerator>
    The Compute Engine accelerator configuration for these instances.
    DiskConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig
    Disk option config settings.
    Image string
    The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    InstanceNames List<string>
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    IsPreemptible bool
    Output only. Specifies that this instance group contains preemptible instances.
    MachineType string
    The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`.
    ManagedGroupConfigs List<WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig>
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    MinCpuPlatform string
    Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
    NumInstances int
    The number of VM instances in the instance group. For master instance groups, must be set to 1.
    Preemptibility string
    Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    Accelerators []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerator
    The Compute Engine accelerator configuration for these instances.
    DiskConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig
    Disk option config settings.
    Image string
    The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    InstanceNames []string
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    IsPreemptible bool
    Output only. Specifies that this instance group contains preemptible instances.
    MachineType string
    The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`.
    ManagedGroupConfigs []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    MinCpuPlatform string
    Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
    NumInstances int
    The number of VM instances in the instance group. For master instance groups, must be set to 1.
    Preemptibility string
    Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    accelerators List<WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerator>
    The Compute Engine accelerator configuration for these instances.
    diskConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig
    Disk option config settings.
    image String
    The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    instanceNames List<String>
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    isPreemptible Boolean
    Output only. Specifies that this instance group contains preemptible instances.
    machineType String
    The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`.
    managedGroupConfigs List<WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig>
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    minCpuPlatform String
    Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
    numInstances Integer
    The number of VM instances in the instance group. For master instance groups, must be set to 1.
    preemptibility String
    Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    accelerators WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerator[]
    The Compute Engine accelerator configuration for these instances.
    diskConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig
    Disk option config settings.
    image string
    The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    instanceNames string[]
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    isPreemptible boolean
    Output only. Specifies that this instance group contains preemptible instances.
    machineType string
    The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`.
    managedGroupConfigs WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig[]
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    minCpuPlatform string
    Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
    numInstances number
    The number of VM instances in the instance group. For master instance groups, must be set to 1.
    preemptibility string
    Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    accelerators Sequence[WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerator]
    The Compute Engine accelerator configuration for these instances.
    disk_config WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig
    Disk option config settings.
    image str
    The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    instance_names Sequence[str]
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    is_preemptible bool
    Output only. Specifies that this instance group contains preemptible instances.
    machine_type str
    The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`.
    managed_group_configs Sequence[WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig]
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    min_cpu_platform str
    Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
    num_instances int
    The number of VM instances in the instance group. For master instance groups, must be set to 1.
    preemptibility str
    Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    accelerators List<Property Map>
    The Compute Engine accelerator configuration for these instances.
    diskConfig Property Map
    Disk option config settings.
    image String
    The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    instanceNames List<String>
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    isPreemptible Boolean
    Output only. Specifies that this instance group contains preemptible instances.
    machineType String
    The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`.
    managedGroupConfigs List<Property Map>
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    minCpuPlatform String
    Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
    numInstances Number
    The number of VM instances in the instance group. For master instance groups, must be set to 1.
    preemptibility String
    Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE

    WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerator, WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs

    AcceleratorCount int
    The number of the accelerator cards of this type exposed to this instance.
    AcceleratorType string
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    AcceleratorCount int
    The number of the accelerator cards of this type exposed to this instance.
    AcceleratorType string
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    acceleratorCount Integer
    The number of the accelerator cards of this type exposed to this instance.
    acceleratorType String
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    acceleratorCount number
    The number of the accelerator cards of this type exposed to this instance.
    acceleratorType string
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    accelerator_count int
    The number of the accelerator cards of this type exposed to this instance.
    accelerator_type str
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    acceleratorCount Number
    The number of the accelerator cards of this type exposed to this instance.
    acceleratorType String
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.

    WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs

    BootDiskSizeGb int
    Size in GB of the boot disk (default is 500GB).
    BootDiskType string
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    NumLocalSsds int
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    BootDiskSizeGb int
    Size in GB of the boot disk (default is 500GB).
    BootDiskType string
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    NumLocalSsds int
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    bootDiskSizeGb Integer
    Size in GB of the boot disk (default is 500GB).
    bootDiskType String
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    numLocalSsds Integer
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    bootDiskSizeGb number
    Size in GB of the boot disk (default is 500GB).
    bootDiskType string
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    numLocalSsds number
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    boot_disk_size_gb int
    Size in GB of the boot disk (default is 500GB).
    boot_disk_type str
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    num_local_ssds int
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    bootDiskSizeGb Number
    Size in GB of the boot disk (default is 500GB).
    bootDiskType String
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    numLocalSsds Number
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.

    WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs

    InstanceGroupManagerName string
    Output only. The name of the Instance Group Manager for this group.
    InstanceTemplateName string
    Output only. The name of the Instance Template used for the Managed Instance Group.
    InstanceGroupManagerName string
    Output only. The name of the Instance Group Manager for this group.
    InstanceTemplateName string
    Output only. The name of the Instance Template used for the Managed Instance Group.
    instanceGroupManagerName String
    Output only. The name of the Instance Group Manager for this group.
    instanceTemplateName String
    Output only. The name of the Instance Template used for the Managed Instance Group.
    instanceGroupManagerName string
    Output only. The name of the Instance Group Manager for this group.
    instanceTemplateName string
    Output only. The name of the Instance Template used for the Managed Instance Group.
    instance_group_manager_name str
    Output only. The name of the Instance Group Manager for this group.
    instance_template_name str
    Output only. The name of the Instance Template used for the Managed Instance Group.
    instanceGroupManagerName String
    Output only. The name of the Instance Group Manager for this group.
    instanceTemplateName String
    Output only. The name of the Instance Template used for the Managed Instance Group.

    WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs

    DataprocMetastoreService string
    Required. Resource name of an existing Dataproc Metastore service. Example: * projects/
    DataprocMetastoreService string
    Required. Resource name of an existing Dataproc Metastore service. Example: * projects/
    dataprocMetastoreService String
    Required. Resource name of an existing Dataproc Metastore service. Example: * projects/
    dataprocMetastoreService string
    Required. Resource name of an existing Dataproc Metastore service. Example: * projects/
    dataproc_metastore_service str
    Required. Resource name of an existing Dataproc Metastore service. Example: * projects/
    dataprocMetastoreService String
    Required. Resource name of an existing Dataproc Metastore service. Example: * projects/

    WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs

    Accelerators List<WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerator>
    Optional. The Compute Engine accelerator configuration for these instances.
    DiskConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig
    Optional. Disk option config settings.
    Image string
    Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] * projects/[project_id]/global/images/[image-id] * image-id Image family examples. Dataproc will use the most recent image from the family: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] * projects/[project_id]/global/images/family/[custom-image-family-name] If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    InstanceNames List<string>
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    IsPreemptible bool
    Output only. Specifies that this instance group contains preemptible instances.
    MachineType string
    Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * n1-standard-2 Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2.
    ManagedGroupConfigs List<WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig>
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    MinCpuPlatform string
    Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
    NumInstances int
    Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
    Preemptibility string
    Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    Accelerators []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerator
    Optional. The Compute Engine accelerator configuration for these instances.
    DiskConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig
    Optional. Disk option config settings.
    Image string
    Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] * projects/[project_id]/global/images/[image-id] * image-id Image family examples. Dataproc will use the most recent image from the family: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] * projects/[project_id]/global/images/family/[custom-image-family-name] If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    InstanceNames []string
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    IsPreemptible bool
    Output only. Specifies that this instance group contains preemptible instances.
    MachineType string
    Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * n1-standard-2 Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2.
    ManagedGroupConfigs []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    MinCpuPlatform string
    Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
    NumInstances int
    Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
    Preemptibility string
    Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    accelerators List<WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerator>
    Optional. The Compute Engine accelerator configuration for these instances.
    diskConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig
    Optional. Disk option config settings.
    image String
    Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] * projects/[project_id]/global/images/[image-id] * image-id Image family examples. Dataproc will use the most recent image from the family: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] * projects/[project_id]/global/images/family/[custom-image-family-name] If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    instanceNames List<String>
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    isPreemptible Boolean
    Output only. Specifies that this instance group contains preemptible instances.
    machineType String
    Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * n1-standard-2 Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2.
    managedGroupConfigs List<WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig>
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    minCpuPlatform String
    Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
    numInstances Integer
    Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
    preemptibility String
    Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    accelerators WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerator[]
    Optional. The Compute Engine accelerator configuration for these instances.
    diskConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig
    Optional. Disk option config settings.
    image string
    Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] * projects/[project_id]/global/images/[image-id] * image-id Image family examples. Dataproc will use the most recent image from the family: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] * projects/[project_id]/global/images/family/[custom-image-family-name] If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    instanceNames string[]
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    isPreemptible boolean
    Output only. Specifies that this instance group contains preemptible instances.
    machineType string
    Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * n1-standard-2 Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2.
    managedGroupConfigs WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig[]
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    minCpuPlatform string
    Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
    numInstances number
    Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
    preemptibility string
    Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    accelerators Sequence[WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerator]
    Optional. The Compute Engine accelerator configuration for these instances.
    disk_config WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig
    Optional. Disk option config settings.
    image str
    Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] * projects/[project_id]/global/images/[image-id] * image-id Image family examples. Dataproc will use the most recent image from the family: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] * projects/[project_id]/global/images/family/[custom-image-family-name] If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    instance_names Sequence[str]
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    is_preemptible bool
    Output only. Specifies that this instance group contains preemptible instances.
    machine_type str
    Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * n1-standard-2 Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2.
    managed_group_configs Sequence[WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig]
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    min_cpu_platform str
    Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
    num_instances int
    Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
    preemptibility str
    Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    accelerators List<Property Map>
    Optional. The Compute Engine accelerator configuration for these instances.
    diskConfig Property Map
    Optional. Disk option config settings.
    image String
    Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] * projects/[project_id]/global/images/[image-id] * image-id Image family examples. Dataproc will use the most recent image from the family: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] * projects/[project_id]/global/images/family/[custom-image-family-name] If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    instanceNames List<String>
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    isPreemptible Boolean
    Output only. Specifies that this instance group contains preemptible instances.
    machineType String
    Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * n1-standard-2 Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2.
    managedGroupConfigs List<Property Map>
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    minCpuPlatform String
    Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
    numInstances Number
    Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
    preemptibility String
    Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE

    WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerator, WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs

    AcceleratorCount int
    The number of the accelerator cards of this type exposed to this instance.
    AcceleratorType string
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    AcceleratorCount int
    The number of the accelerator cards of this type exposed to this instance.
    AcceleratorType string
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    acceleratorCount Integer
    The number of the accelerator cards of this type exposed to this instance.
    acceleratorType String
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    acceleratorCount number
    The number of the accelerator cards of this type exposed to this instance.
    acceleratorType string
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    accelerator_count int
    The number of the accelerator cards of this type exposed to this instance.
    accelerator_type str
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    acceleratorCount Number
    The number of the accelerator cards of this type exposed to this instance.
    acceleratorType String
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.

    WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs

    BootDiskSizeGb int
    Size in GB of the boot disk (default is 500GB).
    BootDiskType string
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    NumLocalSsds int
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    BootDiskSizeGb int
    Size in GB of the boot disk (default is 500GB).
    BootDiskType string
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    NumLocalSsds int
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    bootDiskSizeGb Integer
    Size in GB of the boot disk (default is 500GB).
    bootDiskType String
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    numLocalSsds Integer
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    bootDiskSizeGb number
    Size in GB of the boot disk (default is 500GB).
    bootDiskType string
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    numLocalSsds number
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    boot_disk_size_gb int
    Size in GB of the boot disk (default is 500GB).
    boot_disk_type str
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    num_local_ssds int
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    bootDiskSizeGb Number
    Size in GB of the boot disk (default is 500GB).
    bootDiskType String
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    numLocalSsds Number
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.

    WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs

    InstanceGroupManagerName string
    Output only. The name of the Instance Group Manager for this group.
    InstanceTemplateName string
    Output only. The name of the Instance Template used for the Managed Instance Group.
    InstanceGroupManagerName string
    Output only. The name of the Instance Group Manager for this group.
    InstanceTemplateName string
    Output only. The name of the Instance Template used for the Managed Instance Group.
    instanceGroupManagerName String
    Output only. The name of the Instance Group Manager for this group.
    instanceTemplateName String
    Output only. The name of the Instance Template used for the Managed Instance Group.
    instanceGroupManagerName string
    Output only. The name of the Instance Group Manager for this group.
    instanceTemplateName string
    Output only. The name of the Instance Template used for the Managed Instance Group.
    instance_group_manager_name str
    Output only. The name of the Instance Group Manager for this group.
    instance_template_name str
    Output only. The name of the Instance Template used for the Managed Instance Group.
    instanceGroupManagerName String
    Output only. The name of the Instance Group Manager for this group.
    instanceTemplateName String
    Output only. The name of the Instance Template used for the Managed Instance Group.

    WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs

    kerberosConfig Property Map
    Kerberos related configuration.

    WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs

    CrossRealmTrustAdminServer string
    The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    CrossRealmTrustKdc string
    The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    CrossRealmTrustRealm string
    The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
    CrossRealmTrustSharedPassword string
    The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
    EnableKerberos bool
    Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
    KdcDbKey string
    The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
    KeyPassword string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
    Keystore string
    The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    KeystorePassword string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
    KmsKey string
    The uri of the KMS key used to encrypt various sensitive files.
    Realm string
    The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
    RootPrincipalPassword string
    The Cloud Storage URI of a KMS encrypted file containing the root principal password.
    TgtLifetimeHours int
    The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
    Truststore string
    The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    TruststorePassword string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
    CrossRealmTrustAdminServer string
    The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    CrossRealmTrustKdc string
    The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    CrossRealmTrustRealm string
    The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
    CrossRealmTrustSharedPassword string
    The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
    EnableKerberos bool
    Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
    KdcDbKey string
    The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
    KeyPassword string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
    Keystore string
    The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    KeystorePassword string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
    KmsKey string
    The uri of the KMS key used to encrypt various sensitive files.
    Realm string
    The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
    RootPrincipalPassword string
    The Cloud Storage URI of a KMS encrypted file containing the root principal password.
    TgtLifetimeHours int
    The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
    Truststore string
    The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    TruststorePassword string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
    crossRealmTrustAdminServer String
    The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustKdc String
    The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustRealm String
    The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
    crossRealmTrustSharedPassword String
    The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
    enableKerberos Boolean
    Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
    kdcDbKey String
    The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
    keyPassword String
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
    keystore String
    The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    keystorePassword String
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
    kmsKey String
    The uri of the KMS key used to encrypt various sensitive files.
    realm String
    The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
    rootPrincipalPassword String
    The Cloud Storage URI of a KMS encrypted file containing the root principal password.
    tgtLifetimeHours Integer
    The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
    truststore String
    The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    truststorePassword String
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
    crossRealmTrustAdminServer string
    The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustKdc string
    The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustRealm string
    The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
    crossRealmTrustSharedPassword string
    The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
    enableKerberos boolean
    Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
    kdcDbKey string
    The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
    keyPassword string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
    keystore string
    The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    keystorePassword string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
    kmsKey string
    The uri of the KMS key used to encrypt various sensitive files.
    realm string
    The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
    rootPrincipalPassword string
    The Cloud Storage URI of a KMS encrypted file containing the root principal password.
    tgtLifetimeHours number
    The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
    truststore string
    The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    truststorePassword string
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
    cross_realm_trust_admin_server str
    The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    cross_realm_trust_kdc str
    The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    cross_realm_trust_realm str
    The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
    cross_realm_trust_shared_password str
    The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
    enable_kerberos bool
    Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
    kdc_db_key str
    The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
    key_password str
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
    keystore str
    The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    keystore_password str
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
    kms_key str
    The uri of the KMS key used to encrypt various sensitive files.
    realm str
    The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
    root_principal_password str
    The Cloud Storage URI of a KMS encrypted file containing the root principal password.
    tgt_lifetime_hours int
    The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
    truststore str
    The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    truststore_password str
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
    crossRealmTrustAdminServer String
    The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustKdc String
    The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustRealm String
    The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
    crossRealmTrustSharedPassword String
    The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
    enableKerberos Boolean
    Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
    kdcDbKey String
    The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
    keyPassword String
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
    keystore String
    The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    keystorePassword String
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
    kmsKey String
    The uri of the KMS key used to encrypt various sensitive files.
    realm String
    The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
    rootPrincipalPassword String
    The Cloud Storage URI of a KMS encrypted file containing the root principal password.
    tgtLifetimeHours Number
    The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
    truststore String
    The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
    truststorePassword String
    The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.

    WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs

    ImageVersion string
    The version of software inside the cluster. It must be one of the supported Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest Debian version.
    OptionalComponents List<string>
    The set of components to activate on the cluster.
    Properties Dictionary<string, string>

    The properties to set on daemon config files.

    Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings:

    • capacity-scheduler: capacity-scheduler.xml
    • core: core-site.xml
    • distcp: distcp-default.xml
    • hdfs: hdfs-site.xml
    • hive: hive-site.xml
    • mapred: mapred-site.xml
    • pig: pig.properties
    • spark: spark-defaults.conf
    • yarn: yarn-site.xml

    For more information, see Cluster properties.

    ImageVersion string
    The version of software inside the cluster. It must be one of the supported Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest Debian version.
    OptionalComponents []string
    The set of components to activate on the cluster.
    Properties map[string]string

    The properties to set on daemon config files.

    Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings:

    • capacity-scheduler: capacity-scheduler.xml
    • core: core-site.xml
    • distcp: distcp-default.xml
    • hdfs: hdfs-site.xml
    • hive: hive-site.xml
    • mapred: mapred-site.xml
    • pig: pig.properties
    • spark: spark-defaults.conf
    • yarn: yarn-site.xml

    For more information, see Cluster properties.

    imageVersion String
    The version of software inside the cluster. It must be one of the supported Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest Debian version.
    optionalComponents List<String>
    The set of components to activate on the cluster.
    properties Map<String,String>

    The properties to set on daemon config files.

    Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings:

    • capacity-scheduler: capacity-scheduler.xml
    • core: core-site.xml
    • distcp: distcp-default.xml
    • hdfs: hdfs-site.xml
    • hive: hive-site.xml
    • mapred: mapred-site.xml
    • pig: pig.properties
    • spark: spark-defaults.conf
    • yarn: yarn-site.xml

    For more information, see Cluster properties.

    imageVersion string
    The version of software inside the cluster. It must be one of the supported Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest Debian version.
    optionalComponents string[]
    The set of components to activate on the cluster.
    properties {[key: string]: string}

    The properties to set on daemon config files.

    Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings:

    • capacity-scheduler: capacity-scheduler.xml
    • core: core-site.xml
    • distcp: distcp-default.xml
    • hdfs: hdfs-site.xml
    • hive: hive-site.xml
    • mapred: mapred-site.xml
    • pig: pig.properties
    • spark: spark-defaults.conf
    • yarn: yarn-site.xml

    For more information, see Cluster properties.

    image_version str
    The version of software inside the cluster. It must be one of the supported Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest Debian version.
    optional_components Sequence[str]
    The set of components to activate on the cluster.
    properties Mapping[str, str]

    The properties to set on daemon config files.

    Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings:

    • capacity-scheduler: capacity-scheduler.xml
    • core: core-site.xml
    • distcp: distcp-default.xml
    • hdfs: hdfs-site.xml
    • hive: hive-site.xml
    • mapred: mapred-site.xml
    • pig: pig.properties
    • spark: spark-defaults.conf
    • yarn: yarn-site.xml

    For more information, see Cluster properties.

    imageVersion String
    The version of software inside the cluster. It must be one of the supported Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest Debian version.
    optionalComponents List<String>
    The set of components to activate on the cluster.
    properties Map<String>

    The properties to set on daemon config files.

    Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings:

    • capacity-scheduler: capacity-scheduler.xml
    • core: core-site.xml
    • distcp: distcp-default.xml
    • hdfs: hdfs-site.xml
    • hive: hive-site.xml
    • mapred: mapred-site.xml
    • pig: pig.properties
    • spark: spark-defaults.conf
    • yarn: yarn-site.xml

    For more information, see Cluster properties.

    WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs

    Accelerators List<WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerator>
    Optional. The Compute Engine accelerator configuration for these instances.
    DiskConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig
    Optional. Disk option config settings.
    Image string
    Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] * projects/[project_id]/global/images/[image-id] * image-id Image family examples. Dataproc will use the most recent image from the family: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] * projects/[project_id]/global/images/family/[custom-image-family-name] If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    InstanceNames List<string>
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    IsPreemptible bool
    Output only. Specifies that this instance group contains preemptible instances.
    MachineType string
    Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * n1-standard-2 Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2.
    ManagedGroupConfigs List<WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig>
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    MinCpuPlatform string
    Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
    NumInstances int
    Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
    Preemptibility string
    Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    Accelerators []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerator
    Optional. The Compute Engine accelerator configuration for these instances.
    DiskConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig
    Optional. Disk option config settings.
    Image string
    Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] * projects/[project_id]/global/images/[image-id] * image-id Image family examples. Dataproc will use the most recent image from the family: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] * projects/[project_id]/global/images/family/[custom-image-family-name] If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    InstanceNames []string
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    IsPreemptible bool
    Output only. Specifies that this instance group contains preemptible instances.
    MachineType string
    Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * n1-standard-2 Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2.
    ManagedGroupConfigs []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    MinCpuPlatform string
    Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
    NumInstances int
    Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
    Preemptibility string
    Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    accelerators List<WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerator>
    Optional. The Compute Engine accelerator configuration for these instances.
    diskConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig
    Optional. Disk option config settings.
    image String
    Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] * projects/[project_id]/global/images/[image-id] * image-id Image family examples. Dataproc will use the most recent image from the family: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] * projects/[project_id]/global/images/family/[custom-image-family-name] If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    instanceNames List<String>
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    isPreemptible Boolean
    Output only. Specifies that this instance group contains preemptible instances.
    machineType String
    Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * n1-standard-2 Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2.
    managedGroupConfigs List<WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig>
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    minCpuPlatform String
    Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
    numInstances Integer
    Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
    preemptibility String
    Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    accelerators WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerator[]
    Optional. The Compute Engine accelerator configuration for these instances.
    diskConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig
    Optional. Disk option config settings.
    image string
    Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] * projects/[project_id]/global/images/[image-id] * image-id Image family examples. Dataproc will use the most recent image from the family: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] * projects/[project_id]/global/images/family/[custom-image-family-name] If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    instanceNames string[]
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    isPreemptible boolean
    Output only. Specifies that this instance group contains preemptible instances.
    machineType string
    Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * n1-standard-2 Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2.
    managedGroupConfigs WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig[]
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    minCpuPlatform string
    Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
    numInstances number
    Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
    preemptibility string
    Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    accelerators Sequence[WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerator]
    Optional. The Compute Engine accelerator configuration for these instances.
    disk_config WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig
    Optional. Disk option config settings.
    image str
    Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] * projects/[project_id]/global/images/[image-id] * image-id Image family examples. Dataproc will use the most recent image from the family: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] * projects/[project_id]/global/images/family/[custom-image-family-name] If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    instance_names Sequence[str]
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    is_preemptible bool
    Output only. Specifies that this instance group contains preemptible instances.
    machine_type str
    Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * n1-standard-2 Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2.
    managed_group_configs Sequence[WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig]
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    min_cpu_platform str
    Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
    num_instances int
    Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
    preemptibility str
    Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
    accelerators List<Property Map>
    Optional. The Compute Engine accelerator configuration for these instances.
    diskConfig Property Map
    Optional. Disk option config settings.
    image String
    Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id] * projects/[project_id]/global/images/[image-id] * image-id Image family examples. Dataproc will use the most recent image from the family: * https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name] * projects/[project_id]/global/images/family/[custom-image-family-name] If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.
    instanceNames List<String>
    Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.
    isPreemptible Boolean
    Output only. Specifies that this instance group contains preemptible instances.
    machineType String
    Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 * n1-standard-2 Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example, n1-standard-2.
    managedGroupConfigs List<Property Map>
    Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
    minCpuPlatform String
    Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
    numInstances Number
    Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
    preemptibility String
    Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE

    WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerator, WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs

    AcceleratorCount int
    The number of the accelerator cards of this type exposed to this instance.
    AcceleratorType string
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    AcceleratorCount int
    The number of the accelerator cards of this type exposed to this instance.
    AcceleratorType string
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    acceleratorCount Integer
    The number of the accelerator cards of this type exposed to this instance.
    acceleratorType String
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    acceleratorCount number
    The number of the accelerator cards of this type exposed to this instance.
    acceleratorType string
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    accelerator_count int
    The number of the accelerator cards of this type exposed to this instance.
    accelerator_type str
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
    acceleratorCount Number
    The number of the accelerator cards of this type exposed to this instance.
    acceleratorType String
    Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.

    WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs

    BootDiskSizeGb int
    Size in GB of the boot disk (default is 500GB).
    BootDiskType string
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    NumLocalSsds int
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    BootDiskSizeGb int
    Size in GB of the boot disk (default is 500GB).
    BootDiskType string
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    NumLocalSsds int
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    bootDiskSizeGb Integer
    Size in GB of the boot disk (default is 500GB).
    bootDiskType String
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    numLocalSsds Integer
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    bootDiskSizeGb number
    Size in GB of the boot disk (default is 500GB).
    bootDiskType string
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    numLocalSsds number
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    boot_disk_size_gb int
    Size in GB of the boot disk (default is 500GB).
    boot_disk_type str
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    num_local_ssds int
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
    bootDiskSizeGb Number
    Size in GB of the boot disk (default is 500GB).
    bootDiskType String
    Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
    numLocalSsds Number
    Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.

    WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs

    InstanceGroupManagerName string
    Output only. The name of the Instance Group Manager for this group.
    InstanceTemplateName string
    Output only. The name of the Instance Template used for the Managed Instance Group.
    InstanceGroupManagerName string
    Output only. The name of the Instance Group Manager for this group.
    InstanceTemplateName string
    Output only. The name of the Instance Template used for the Managed Instance Group.
    instanceGroupManagerName String
    Output only. The name of the Instance Group Manager for this group.
    instanceTemplateName String
    Output only. The name of the Instance Template used for the Managed Instance Group.
    instanceGroupManagerName string
    Output only. The name of the Instance Group Manager for this group.
    instanceTemplateName string
    Output only. The name of the Instance Template used for the Managed Instance Group.
    instance_group_manager_name str
    Output only. The name of the Instance Group Manager for this group.
    instance_template_name str
    Output only. The name of the Instance Template used for the Managed Instance Group.
    instanceGroupManagerName String
    Output only. The name of the Instance Group Manager for this group.
    instanceTemplateName String
    Output only. The name of the Instance Template used for the Managed Instance Group.

    Import

    WorkflowTemplate can be imported using any of these accepted formats:

    • projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}

    • {{project}}/{{location}}/{{name}}

    • {{location}}/{{name}}

    When using the pulumi import command, WorkflowTemplate can be imported using one of the formats above. For example:

    $ pulumi import gcp:dataproc/workflowTemplate:WorkflowTemplate default projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}
    
    $ pulumi import gcp:dataproc/workflowTemplate:WorkflowTemplate default {{project}}/{{location}}/{{name}}
    
    $ pulumi import gcp:dataproc/workflowTemplate:WorkflowTemplate default {{location}}/{{name}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v7.20.0 published on Wednesday, Apr 24, 2024 by Pulumi