1. Packages
  2. Databricks
  3. API Docs
  4. Cluster
Databricks v1.35.0 published on Friday, Mar 29, 2024 by Pulumi

databricks.Cluster

Explore with Pulumi AI

databricks logo
Databricks v1.35.0 published on Friday, Mar 29, 2024 by Pulumi

    This resource allows you to manage Databricks Clusters.

    Note In case of Cannot access cluster ####-######-####### that was terminated or unpinned more than 30 days ago command.

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const smallest = databricks.getNodeType({
        localDisk: true,
    });
    const latestLts = databricks.getSparkVersion({
        longTermSupport: true,
    });
    const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", {
        clusterName: "Shared Autoscaling",
        sparkVersion: latestLts.then(latestLts => latestLts.id),
        nodeTypeId: smallest.then(smallest => smallest.id),
        autoterminationMinutes: 20,
        autoscale: {
            minWorkers: 1,
            maxWorkers: 50,
        },
    });
    
    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    

    Access Control

    • databricks.Group and databricks.User can control which groups or individual users can create clusters.
    • databricks.ClusterPolicy can control which kinds of clusters users can create.
    • Users, who have access to Cluster Policy, but do not have an allow_cluster_create argument set would still be able to create clusters, but within the boundary of the policy.
    • databricks.Permissions can control which groups or individual users can Manage, Restart or Attach to individual clusters.
    • instance_profile_arn (AWS only) can control which data a given cluster can access through cloud-native controls.

    The following resources are often used in the same context:

    • Dynamic Passthrough Clusters for a Group guide.
    • End to end workspace management guide.
    • databricks.getClusters data to retrieve a list of databricks.Cluster ids.
    • databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules.
    • databricks.getCurrentUser data to retrieve information about databricks.User or databricks_service_principal, that is calling Databricks REST API.
    • databricks.GlobalInitScript to manage global init scripts, which are run on all databricks.Cluster and databricks_job.
    • databricks.InstancePool to manage instance pools to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances.
    • databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.
    • databricks.Job to manage Databricks Jobs to run non-interactive code in a databricks_cluster.
    • databricks.Library to install a library on databricks_cluster.
    • databricks.Mount to mount your cloud storage on dbfs:/mnt/name.
    • databricks.getNodeType data to get the smallest node type for databricks.Cluster that fits search criteria, like amount of RAM or number of cores.
    • databricks.Pipeline to deploy Delta Live Tables.
    • databricks.getSparkVersion data to get Databricks Runtime (DBR) version that could be used for spark_version parameter in databricks.Cluster and other resources.
    • databricks.getZones data to fetch all available AWS availability zones on your workspace on AWS.

    Create Cluster Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Cluster(name: string, args: ClusterArgs, opts?: CustomResourceOptions);
    @overload
    def Cluster(resource_name: str,
                args: ClusterArgs,
                opts: Optional[ResourceOptions] = None)
    
    @overload
    def Cluster(resource_name: str,
                opts: Optional[ResourceOptions] = None,
                spark_version: Optional[str] = None,
                gcp_attributes: Optional[ClusterGcpAttributesArgs] = None,
                workload_type: Optional[ClusterWorkloadTypeArgs] = None,
                enable_local_disk_encryption: Optional[bool] = None,
                azure_attributes: Optional[ClusterAzureAttributesArgs] = None,
                cluster_id: Optional[str] = None,
                cluster_log_conf: Optional[ClusterClusterLogConfArgs] = None,
                cluster_mount_infos: Optional[Sequence[ClusterClusterMountInfoArgs]] = None,
                cluster_name: Optional[str] = None,
                custom_tags: Optional[Mapping[str, Any]] = None,
                data_security_mode: Optional[str] = None,
                docker_image: Optional[ClusterDockerImageArgs] = None,
                driver_instance_pool_id: Optional[str] = None,
                driver_node_type_id: Optional[str] = None,
                enable_elastic_disk: Optional[bool] = None,
                aws_attributes: Optional[ClusterAwsAttributesArgs] = None,
                idempotency_token: Optional[str] = None,
                autotermination_minutes: Optional[int] = None,
                init_scripts: Optional[Sequence[ClusterInitScriptArgs]] = None,
                instance_pool_id: Optional[str] = None,
                is_pinned: Optional[bool] = None,
                libraries: Optional[Sequence[ClusterLibraryArgs]] = None,
                node_type_id: Optional[str] = None,
                num_workers: Optional[int] = None,
                policy_id: Optional[str] = None,
                runtime_engine: Optional[str] = None,
                single_user_name: Optional[str] = None,
                spark_conf: Optional[Mapping[str, Any]] = None,
                spark_env_vars: Optional[Mapping[str, Any]] = None,
                autoscale: Optional[ClusterAutoscaleArgs] = None,
                ssh_public_keys: Optional[Sequence[str]] = None,
                apply_policy_default_values: Optional[bool] = None)
    func NewCluster(ctx *Context, name string, args ClusterArgs, opts ...ResourceOption) (*Cluster, error)
    public Cluster(string name, ClusterArgs args, CustomResourceOptions? opts = null)
    public Cluster(String name, ClusterArgs args)
    public Cluster(String name, ClusterArgs args, CustomResourceOptions options)
    
    type: databricks:Cluster
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var clusterResource = new Databricks.Cluster("clusterResource", new()
    {
        SparkVersion = "string",
        GcpAttributes = new Databricks.Inputs.ClusterGcpAttributesArgs
        {
            Availability = "string",
            BootDiskSize = 0,
            GoogleServiceAccount = "string",
            LocalSsdCount = 0,
            UsePreemptibleExecutors = false,
            ZoneId = "string",
        },
        WorkloadType = new Databricks.Inputs.ClusterWorkloadTypeArgs
        {
            Clients = new Databricks.Inputs.ClusterWorkloadTypeClientsArgs
            {
                Jobs = false,
                Notebooks = false,
            },
        },
        EnableLocalDiskEncryption = false,
        AzureAttributes = new Databricks.Inputs.ClusterAzureAttributesArgs
        {
            Availability = "string",
            FirstOnDemand = 0,
            LogAnalyticsInfo = new Databricks.Inputs.ClusterAzureAttributesLogAnalyticsInfoArgs
            {
                LogAnalyticsPrimaryKey = "string",
                LogAnalyticsWorkspaceId = "string",
            },
            SpotBidMaxPrice = 0,
        },
        ClusterId = "string",
        ClusterLogConf = new Databricks.Inputs.ClusterClusterLogConfArgs
        {
            Dbfs = new Databricks.Inputs.ClusterClusterLogConfDbfsArgs
            {
                Destination = "string",
            },
            S3 = new Databricks.Inputs.ClusterClusterLogConfS3Args
            {
                Destination = "string",
                CannedAcl = "string",
                EnableEncryption = false,
                EncryptionType = "string",
                Endpoint = "string",
                KmsKey = "string",
                Region = "string",
            },
        },
        ClusterMountInfos = new[]
        {
            new Databricks.Inputs.ClusterClusterMountInfoArgs
            {
                LocalMountDirPath = "string",
                NetworkFilesystemInfo = new Databricks.Inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs
                {
                    ServerAddress = "string",
                    MountOptions = "string",
                },
                RemoteMountDirPath = "string",
            },
        },
        ClusterName = "string",
        CustomTags = 
        {
            { "string", "any" },
        },
        DataSecurityMode = "string",
        DockerImage = new Databricks.Inputs.ClusterDockerImageArgs
        {
            Url = "string",
            BasicAuth = new Databricks.Inputs.ClusterDockerImageBasicAuthArgs
            {
                Password = "string",
                Username = "string",
            },
        },
        DriverInstancePoolId = "string",
        DriverNodeTypeId = "string",
        EnableElasticDisk = false,
        AwsAttributes = new Databricks.Inputs.ClusterAwsAttributesArgs
        {
            Availability = "string",
            EbsVolumeCount = 0,
            EbsVolumeIops = 0,
            EbsVolumeSize = 0,
            EbsVolumeThroughput = 0,
            EbsVolumeType = "string",
            FirstOnDemand = 0,
            InstanceProfileArn = "string",
            SpotBidPricePercent = 0,
            ZoneId = "string",
        },
        IdempotencyToken = "string",
        AutoterminationMinutes = 0,
        InitScripts = new[]
        {
            new Databricks.Inputs.ClusterInitScriptArgs
            {
                Abfss = new Databricks.Inputs.ClusterInitScriptAbfssArgs
                {
                    Destination = "string",
                },
                File = new Databricks.Inputs.ClusterInitScriptFileArgs
                {
                    Destination = "string",
                },
                Gcs = new Databricks.Inputs.ClusterInitScriptGcsArgs
                {
                    Destination = "string",
                },
                S3 = new Databricks.Inputs.ClusterInitScriptS3Args
                {
                    Destination = "string",
                    CannedAcl = "string",
                    EnableEncryption = false,
                    EncryptionType = "string",
                    Endpoint = "string",
                    KmsKey = "string",
                    Region = "string",
                },
                Volumes = new Databricks.Inputs.ClusterInitScriptVolumesArgs
                {
                    Destination = "string",
                },
                Workspace = new Databricks.Inputs.ClusterInitScriptWorkspaceArgs
                {
                    Destination = "string",
                },
            },
        },
        InstancePoolId = "string",
        IsPinned = false,
        Libraries = new[]
        {
            new Databricks.Inputs.ClusterLibraryArgs
            {
                Cran = new Databricks.Inputs.ClusterLibraryCranArgs
                {
                    Package = "string",
                    Repo = "string",
                },
                Egg = "string",
                Jar = "string",
                Maven = new Databricks.Inputs.ClusterLibraryMavenArgs
                {
                    Coordinates = "string",
                    Exclusions = new[]
                    {
                        "string",
                    },
                    Repo = "string",
                },
                Pypi = new Databricks.Inputs.ClusterLibraryPypiArgs
                {
                    Package = "string",
                    Repo = "string",
                },
                Whl = "string",
            },
        },
        NodeTypeId = "string",
        NumWorkers = 0,
        PolicyId = "string",
        RuntimeEngine = "string",
        SingleUserName = "string",
        SparkConf = 
        {
            { "string", "any" },
        },
        SparkEnvVars = 
        {
            { "string", "any" },
        },
        Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
        {
            MaxWorkers = 0,
            MinWorkers = 0,
        },
        SshPublicKeys = new[]
        {
            "string",
        },
        ApplyPolicyDefaultValues = false,
    });
    
    example, err := databricks.NewCluster(ctx, "clusterResource", &databricks.ClusterArgs{
    	SparkVersion: pulumi.String("string"),
    	GcpAttributes: &databricks.ClusterGcpAttributesArgs{
    		Availability:            pulumi.String("string"),
    		BootDiskSize:            pulumi.Int(0),
    		GoogleServiceAccount:    pulumi.String("string"),
    		LocalSsdCount:           pulumi.Int(0),
    		UsePreemptibleExecutors: pulumi.Bool(false),
    		ZoneId:                  pulumi.String("string"),
    	},
    	WorkloadType: &databricks.ClusterWorkloadTypeArgs{
    		Clients: &databricks.ClusterWorkloadTypeClientsArgs{
    			Jobs:      pulumi.Bool(false),
    			Notebooks: pulumi.Bool(false),
    		},
    	},
    	EnableLocalDiskEncryption: pulumi.Bool(false),
    	AzureAttributes: &databricks.ClusterAzureAttributesArgs{
    		Availability:  pulumi.String("string"),
    		FirstOnDemand: pulumi.Int(0),
    		LogAnalyticsInfo: &databricks.ClusterAzureAttributesLogAnalyticsInfoArgs{
    			LogAnalyticsPrimaryKey:  pulumi.String("string"),
    			LogAnalyticsWorkspaceId: pulumi.String("string"),
    		},
    		SpotBidMaxPrice: pulumi.Float64(0),
    	},
    	ClusterId: pulumi.String("string"),
    	ClusterLogConf: &databricks.ClusterClusterLogConfArgs{
    		Dbfs: &databricks.ClusterClusterLogConfDbfsArgs{
    			Destination: pulumi.String("string"),
    		},
    		S3: &databricks.ClusterClusterLogConfS3Args{
    			Destination:      pulumi.String("string"),
    			CannedAcl:        pulumi.String("string"),
    			EnableEncryption: pulumi.Bool(false),
    			EncryptionType:   pulumi.String("string"),
    			Endpoint:         pulumi.String("string"),
    			KmsKey:           pulumi.String("string"),
    			Region:           pulumi.String("string"),
    		},
    	},
    	ClusterMountInfos: databricks.ClusterClusterMountInfoArray{
    		&databricks.ClusterClusterMountInfoArgs{
    			LocalMountDirPath: pulumi.String("string"),
    			NetworkFilesystemInfo: &databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs{
    				ServerAddress: pulumi.String("string"),
    				MountOptions:  pulumi.String("string"),
    			},
    			RemoteMountDirPath: pulumi.String("string"),
    		},
    	},
    	ClusterName: pulumi.String("string"),
    	CustomTags: pulumi.Map{
    		"string": pulumi.Any("any"),
    	},
    	DataSecurityMode: pulumi.String("string"),
    	DockerImage: &databricks.ClusterDockerImageArgs{
    		Url: pulumi.String("string"),
    		BasicAuth: &databricks.ClusterDockerImageBasicAuthArgs{
    			Password: pulumi.String("string"),
    			Username: pulumi.String("string"),
    		},
    	},
    	DriverInstancePoolId: pulumi.String("string"),
    	DriverNodeTypeId:     pulumi.String("string"),
    	EnableElasticDisk:    pulumi.Bool(false),
    	AwsAttributes: &databricks.ClusterAwsAttributesArgs{
    		Availability:        pulumi.String("string"),
    		EbsVolumeCount:      pulumi.Int(0),
    		EbsVolumeIops:       pulumi.Int(0),
    		EbsVolumeSize:       pulumi.Int(0),
    		EbsVolumeThroughput: pulumi.Int(0),
    		EbsVolumeType:       pulumi.String("string"),
    		FirstOnDemand:       pulumi.Int(0),
    		InstanceProfileArn:  pulumi.String("string"),
    		SpotBidPricePercent: pulumi.Int(0),
    		ZoneId:              pulumi.String("string"),
    	},
    	IdempotencyToken:       pulumi.String("string"),
    	AutoterminationMinutes: pulumi.Int(0),
    	InitScripts: databricks.ClusterInitScriptArray{
    		&databricks.ClusterInitScriptArgs{
    			Abfss: &databricks.ClusterInitScriptAbfssArgs{
    				Destination: pulumi.String("string"),
    			},
    			File: &databricks.ClusterInitScriptFileArgs{
    				Destination: pulumi.String("string"),
    			},
    			Gcs: &databricks.ClusterInitScriptGcsArgs{
    				Destination: pulumi.String("string"),
    			},
    			S3: &databricks.ClusterInitScriptS3Args{
    				Destination:      pulumi.String("string"),
    				CannedAcl:        pulumi.String("string"),
    				EnableEncryption: pulumi.Bool(false),
    				EncryptionType:   pulumi.String("string"),
    				Endpoint:         pulumi.String("string"),
    				KmsKey:           pulumi.String("string"),
    				Region:           pulumi.String("string"),
    			},
    			Volumes: &databricks.ClusterInitScriptVolumesArgs{
    				Destination: pulumi.String("string"),
    			},
    			Workspace: &databricks.ClusterInitScriptWorkspaceArgs{
    				Destination: pulumi.String("string"),
    			},
    		},
    	},
    	InstancePoolId: pulumi.String("string"),
    	IsPinned:       pulumi.Bool(false),
    	Libraries: databricks.ClusterLibraryArray{
    		&databricks.ClusterLibraryArgs{
    			Cran: &databricks.ClusterLibraryCranArgs{
    				Package: pulumi.String("string"),
    				Repo:    pulumi.String("string"),
    			},
    			Egg: pulumi.String("string"),
    			Jar: pulumi.String("string"),
    			Maven: &databricks.ClusterLibraryMavenArgs{
    				Coordinates: pulumi.String("string"),
    				Exclusions: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				Repo: pulumi.String("string"),
    			},
    			Pypi: &databricks.ClusterLibraryPypiArgs{
    				Package: pulumi.String("string"),
    				Repo:    pulumi.String("string"),
    			},
    			Whl: pulumi.String("string"),
    		},
    	},
    	NodeTypeId:     pulumi.String("string"),
    	NumWorkers:     pulumi.Int(0),
    	PolicyId:       pulumi.String("string"),
    	RuntimeEngine:  pulumi.String("string"),
    	SingleUserName: pulumi.String("string"),
    	SparkConf: pulumi.Map{
    		"string": pulumi.Any("any"),
    	},
    	SparkEnvVars: pulumi.Map{
    		"string": pulumi.Any("any"),
    	},
    	Autoscale: &databricks.ClusterAutoscaleArgs{
    		MaxWorkers: pulumi.Int(0),
    		MinWorkers: pulumi.Int(0),
    	},
    	SshPublicKeys: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    	ApplyPolicyDefaultValues: pulumi.Bool(false),
    })
    
    var clusterResource = new Cluster("clusterResource", ClusterArgs.builder()        
        .sparkVersion("string")
        .gcpAttributes(ClusterGcpAttributesArgs.builder()
            .availability("string")
            .bootDiskSize(0)
            .googleServiceAccount("string")
            .localSsdCount(0)
            .usePreemptibleExecutors(false)
            .zoneId("string")
            .build())
        .workloadType(ClusterWorkloadTypeArgs.builder()
            .clients(ClusterWorkloadTypeClientsArgs.builder()
                .jobs(false)
                .notebooks(false)
                .build())
            .build())
        .enableLocalDiskEncryption(false)
        .azureAttributes(ClusterAzureAttributesArgs.builder()
            .availability("string")
            .firstOnDemand(0)
            .logAnalyticsInfo(ClusterAzureAttributesLogAnalyticsInfoArgs.builder()
                .logAnalyticsPrimaryKey("string")
                .logAnalyticsWorkspaceId("string")
                .build())
            .spotBidMaxPrice(0)
            .build())
        .clusterId("string")
        .clusterLogConf(ClusterClusterLogConfArgs.builder()
            .dbfs(ClusterClusterLogConfDbfsArgs.builder()
                .destination("string")
                .build())
            .s3(ClusterClusterLogConfS3Args.builder()
                .destination("string")
                .cannedAcl("string")
                .enableEncryption(false)
                .encryptionType("string")
                .endpoint("string")
                .kmsKey("string")
                .region("string")
                .build())
            .build())
        .clusterMountInfos(ClusterClusterMountInfoArgs.builder()
            .localMountDirPath("string")
            .networkFilesystemInfo(ClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
                .serverAddress("string")
                .mountOptions("string")
                .build())
            .remoteMountDirPath("string")
            .build())
        .clusterName("string")
        .customTags(Map.of("string", "any"))
        .dataSecurityMode("string")
        .dockerImage(ClusterDockerImageArgs.builder()
            .url("string")
            .basicAuth(ClusterDockerImageBasicAuthArgs.builder()
                .password("string")
                .username("string")
                .build())
            .build())
        .driverInstancePoolId("string")
        .driverNodeTypeId("string")
        .enableElasticDisk(false)
        .awsAttributes(ClusterAwsAttributesArgs.builder()
            .availability("string")
            .ebsVolumeCount(0)
            .ebsVolumeIops(0)
            .ebsVolumeSize(0)
            .ebsVolumeThroughput(0)
            .ebsVolumeType("string")
            .firstOnDemand(0)
            .instanceProfileArn("string")
            .spotBidPricePercent(0)
            .zoneId("string")
            .build())
        .idempotencyToken("string")
        .autoterminationMinutes(0)
        .initScripts(ClusterInitScriptArgs.builder()
            .abfss(ClusterInitScriptAbfssArgs.builder()
                .destination("string")
                .build())
            .file(ClusterInitScriptFileArgs.builder()
                .destination("string")
                .build())
            .gcs(ClusterInitScriptGcsArgs.builder()
                .destination("string")
                .build())
            .s3(ClusterInitScriptS3Args.builder()
                .destination("string")
                .cannedAcl("string")
                .enableEncryption(false)
                .encryptionType("string")
                .endpoint("string")
                .kmsKey("string")
                .region("string")
                .build())
            .volumes(ClusterInitScriptVolumesArgs.builder()
                .destination("string")
                .build())
            .workspace(ClusterInitScriptWorkspaceArgs.builder()
                .destination("string")
                .build())
            .build())
        .instancePoolId("string")
        .isPinned(false)
        .libraries(ClusterLibraryArgs.builder()
            .cran(ClusterLibraryCranArgs.builder()
                .package_("string")
                .repo("string")
                .build())
            .egg("string")
            .jar("string")
            .maven(ClusterLibraryMavenArgs.builder()
                .coordinates("string")
                .exclusions("string")
                .repo("string")
                .build())
            .pypi(ClusterLibraryPypiArgs.builder()
                .package_("string")
                .repo("string")
                .build())
            .whl("string")
            .build())
        .nodeTypeId("string")
        .numWorkers(0)
        .policyId("string")
        .runtimeEngine("string")
        .singleUserName("string")
        .sparkConf(Map.of("string", "any"))
        .sparkEnvVars(Map.of("string", "any"))
        .autoscale(ClusterAutoscaleArgs.builder()
            .maxWorkers(0)
            .minWorkers(0)
            .build())
        .sshPublicKeys("string")
        .applyPolicyDefaultValues(false)
        .build());
    
    cluster_resource = databricks.Cluster("clusterResource",
        spark_version="string",
        gcp_attributes=databricks.ClusterGcpAttributesArgs(
            availability="string",
            boot_disk_size=0,
            google_service_account="string",
            local_ssd_count=0,
            use_preemptible_executors=False,
            zone_id="string",
        ),
        workload_type=databricks.ClusterWorkloadTypeArgs(
            clients=databricks.ClusterWorkloadTypeClientsArgs(
                jobs=False,
                notebooks=False,
            ),
        ),
        enable_local_disk_encryption=False,
        azure_attributes=databricks.ClusterAzureAttributesArgs(
            availability="string",
            first_on_demand=0,
            log_analytics_info=databricks.ClusterAzureAttributesLogAnalyticsInfoArgs(
                log_analytics_primary_key="string",
                log_analytics_workspace_id="string",
            ),
            spot_bid_max_price=0,
        ),
        cluster_id="string",
        cluster_log_conf=databricks.ClusterClusterLogConfArgs(
            dbfs=databricks.ClusterClusterLogConfDbfsArgs(
                destination="string",
            ),
            s3=databricks.ClusterClusterLogConfS3Args(
                destination="string",
                canned_acl="string",
                enable_encryption=False,
                encryption_type="string",
                endpoint="string",
                kms_key="string",
                region="string",
            ),
        ),
        cluster_mount_infos=[databricks.ClusterClusterMountInfoArgs(
            local_mount_dir_path="string",
            network_filesystem_info=databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs(
                server_address="string",
                mount_options="string",
            ),
            remote_mount_dir_path="string",
        )],
        cluster_name="string",
        custom_tags={
            "string": "any",
        },
        data_security_mode="string",
        docker_image=databricks.ClusterDockerImageArgs(
            url="string",
            basic_auth=databricks.ClusterDockerImageBasicAuthArgs(
                password="string",
                username="string",
            ),
        ),
        driver_instance_pool_id="string",
        driver_node_type_id="string",
        enable_elastic_disk=False,
        aws_attributes=databricks.ClusterAwsAttributesArgs(
            availability="string",
            ebs_volume_count=0,
            ebs_volume_iops=0,
            ebs_volume_size=0,
            ebs_volume_throughput=0,
            ebs_volume_type="string",
            first_on_demand=0,
            instance_profile_arn="string",
            spot_bid_price_percent=0,
            zone_id="string",
        ),
        idempotency_token="string",
        autotermination_minutes=0,
        init_scripts=[databricks.ClusterInitScriptArgs(
            abfss=databricks.ClusterInitScriptAbfssArgs(
                destination="string",
            ),
            file=databricks.ClusterInitScriptFileArgs(
                destination="string",
            ),
            gcs=databricks.ClusterInitScriptGcsArgs(
                destination="string",
            ),
            s3=databricks.ClusterInitScriptS3Args(
                destination="string",
                canned_acl="string",
                enable_encryption=False,
                encryption_type="string",
                endpoint="string",
                kms_key="string",
                region="string",
            ),
            volumes=databricks.ClusterInitScriptVolumesArgs(
                destination="string",
            ),
            workspace=databricks.ClusterInitScriptWorkspaceArgs(
                destination="string",
            ),
        )],
        instance_pool_id="string",
        is_pinned=False,
        libraries=[databricks.ClusterLibraryArgs(
            cran=databricks.ClusterLibraryCranArgs(
                package="string",
                repo="string",
            ),
            egg="string",
            jar="string",
            maven=databricks.ClusterLibraryMavenArgs(
                coordinates="string",
                exclusions=["string"],
                repo="string",
            ),
            pypi=databricks.ClusterLibraryPypiArgs(
                package="string",
                repo="string",
            ),
            whl="string",
        )],
        node_type_id="string",
        num_workers=0,
        policy_id="string",
        runtime_engine="string",
        single_user_name="string",
        spark_conf={
            "string": "any",
        },
        spark_env_vars={
            "string": "any",
        },
        autoscale=databricks.ClusterAutoscaleArgs(
            max_workers=0,
            min_workers=0,
        ),
        ssh_public_keys=["string"],
        apply_policy_default_values=False)
    
    const clusterResource = new databricks.Cluster("clusterResource", {
        sparkVersion: "string",
        gcpAttributes: {
            availability: "string",
            bootDiskSize: 0,
            googleServiceAccount: "string",
            localSsdCount: 0,
            usePreemptibleExecutors: false,
            zoneId: "string",
        },
        workloadType: {
            clients: {
                jobs: false,
                notebooks: false,
            },
        },
        enableLocalDiskEncryption: false,
        azureAttributes: {
            availability: "string",
            firstOnDemand: 0,
            logAnalyticsInfo: {
                logAnalyticsPrimaryKey: "string",
                logAnalyticsWorkspaceId: "string",
            },
            spotBidMaxPrice: 0,
        },
        clusterId: "string",
        clusterLogConf: {
            dbfs: {
                destination: "string",
            },
            s3: {
                destination: "string",
                cannedAcl: "string",
                enableEncryption: false,
                encryptionType: "string",
                endpoint: "string",
                kmsKey: "string",
                region: "string",
            },
        },
        clusterMountInfos: [{
            localMountDirPath: "string",
            networkFilesystemInfo: {
                serverAddress: "string",
                mountOptions: "string",
            },
            remoteMountDirPath: "string",
        }],
        clusterName: "string",
        customTags: {
            string: "any",
        },
        dataSecurityMode: "string",
        dockerImage: {
            url: "string",
            basicAuth: {
                password: "string",
                username: "string",
            },
        },
        driverInstancePoolId: "string",
        driverNodeTypeId: "string",
        enableElasticDisk: false,
        awsAttributes: {
            availability: "string",
            ebsVolumeCount: 0,
            ebsVolumeIops: 0,
            ebsVolumeSize: 0,
            ebsVolumeThroughput: 0,
            ebsVolumeType: "string",
            firstOnDemand: 0,
            instanceProfileArn: "string",
            spotBidPricePercent: 0,
            zoneId: "string",
        },
        idempotencyToken: "string",
        autoterminationMinutes: 0,
        initScripts: [{
            abfss: {
                destination: "string",
            },
            file: {
                destination: "string",
            },
            gcs: {
                destination: "string",
            },
            s3: {
                destination: "string",
                cannedAcl: "string",
                enableEncryption: false,
                encryptionType: "string",
                endpoint: "string",
                kmsKey: "string",
                region: "string",
            },
            volumes: {
                destination: "string",
            },
            workspace: {
                destination: "string",
            },
        }],
        instancePoolId: "string",
        isPinned: false,
        libraries: [{
            cran: {
                "package": "string",
                repo: "string",
            },
            egg: "string",
            jar: "string",
            maven: {
                coordinates: "string",
                exclusions: ["string"],
                repo: "string",
            },
            pypi: {
                "package": "string",
                repo: "string",
            },
            whl: "string",
        }],
        nodeTypeId: "string",
        numWorkers: 0,
        policyId: "string",
        runtimeEngine: "string",
        singleUserName: "string",
        sparkConf: {
            string: "any",
        },
        sparkEnvVars: {
            string: "any",
        },
        autoscale: {
            maxWorkers: 0,
            minWorkers: 0,
        },
        sshPublicKeys: ["string"],
        applyPolicyDefaultValues: false,
    });
    
    type: databricks:Cluster
    properties:
        applyPolicyDefaultValues: false
        autoscale:
            maxWorkers: 0
            minWorkers: 0
        autoterminationMinutes: 0
        awsAttributes:
            availability: string
            ebsVolumeCount: 0
            ebsVolumeIops: 0
            ebsVolumeSize: 0
            ebsVolumeThroughput: 0
            ebsVolumeType: string
            firstOnDemand: 0
            instanceProfileArn: string
            spotBidPricePercent: 0
            zoneId: string
        azureAttributes:
            availability: string
            firstOnDemand: 0
            logAnalyticsInfo:
                logAnalyticsPrimaryKey: string
                logAnalyticsWorkspaceId: string
            spotBidMaxPrice: 0
        clusterId: string
        clusterLogConf:
            dbfs:
                destination: string
            s3:
                cannedAcl: string
                destination: string
                enableEncryption: false
                encryptionType: string
                endpoint: string
                kmsKey: string
                region: string
        clusterMountInfos:
            - localMountDirPath: string
              networkFilesystemInfo:
                mountOptions: string
                serverAddress: string
              remoteMountDirPath: string
        clusterName: string
        customTags:
            string: any
        dataSecurityMode: string
        dockerImage:
            basicAuth:
                password: string
                username: string
            url: string
        driverInstancePoolId: string
        driverNodeTypeId: string
        enableElasticDisk: false
        enableLocalDiskEncryption: false
        gcpAttributes:
            availability: string
            bootDiskSize: 0
            googleServiceAccount: string
            localSsdCount: 0
            usePreemptibleExecutors: false
            zoneId: string
        idempotencyToken: string
        initScripts:
            - abfss:
                destination: string
              file:
                destination: string
              gcs:
                destination: string
              s3:
                cannedAcl: string
                destination: string
                enableEncryption: false
                encryptionType: string
                endpoint: string
                kmsKey: string
                region: string
              volumes:
                destination: string
              workspace:
                destination: string
        instancePoolId: string
        isPinned: false
        libraries:
            - cran:
                package: string
                repo: string
              egg: string
              jar: string
              maven:
                coordinates: string
                exclusions:
                    - string
                repo: string
              pypi:
                package: string
                repo: string
              whl: string
        nodeTypeId: string
        numWorkers: 0
        policyId: string
        runtimeEngine: string
        singleUserName: string
        sparkConf:
            string: any
        sparkEnvVars:
            string: any
        sparkVersion: string
        sshPublicKeys:
            - string
        workloadType:
            clients:
                jobs: false
                notebooks: false
    

    Cluster Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Cluster resource accepts the following input properties:

    SparkVersion string
    Runtime version of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
    ApplyPolicyDefaultValues bool
    Whether to use policy default values for missing cluster attributes.
    Autoscale ClusterAutoscale
    AutoterminationMinutes int
    Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to 60. We highly recommend having this setting present for Interactive/BI clusters.
    AwsAttributes ClusterAwsAttributes
    AzureAttributes ClusterAzureAttributes
    ClusterId string
    ClusterLogConf ClusterClusterLogConf
    ClusterMountInfos List<ClusterClusterMountInfo>
    ClusterName string
    Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
    CustomTags Dictionary<string, object>

    should have tag ResourceClass set to value Serverless

    For example:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const clusterWithTableAccessControl = new databricks.Cluster("clusterWithTableAccessControl", { clusterName: "Shared High-Concurrency", sparkVersion: data.databricks_spark_version.latest_lts.id, nodeTypeId: data.databricks_node_type.smallest.id, autoterminationMinutes: 20, sparkConf: { "spark.databricks.repl.allowedLanguages": "python,sql", "spark.databricks.cluster.profile": "serverless", }, customTags: { ResourceClass: "Serverless", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    cluster_with_table_access_control = databricks.Cluster("clusterWithTableAccessControl",
        cluster_name="Shared High-Concurrency",
        spark_version=data["databricks_spark_version"]["latest_lts"]["id"],
        node_type_id=data["databricks_node_type"]["smallest"]["id"],
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.cluster.profile": "serverless",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var clusterWithTableAccessControl = new Databricks.Cluster("clusterWithTableAccessControl", new()
        {
            ClusterName = "Shared High-Concurrency",
            SparkVersion = data.Databricks_spark_version.Latest_lts.Id,
            NodeTypeId = data.Databricks_node_type.Smallest.Id,
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.cluster.profile", "serverless" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "clusterWithTableAccessControl", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared High-Concurrency"),
    			SparkVersion:           pulumi.Any(data.Databricks_spark_version.Latest_lts.Id),
    			NodeTypeId:             pulumi.Any(data.Databricks_node_type.Smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.repl.allowedLanguages": pulumi.Any("python,sql"),
    				"spark.databricks.cluster.profile":       pulumi.Any("serverless"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var clusterWithTableAccessControl = new Cluster("clusterWithTableAccessControl", ClusterArgs.builder()        
                .clusterName("Shared High-Concurrency")
                .sparkVersion(data.databricks_spark_version().latest_lts().id())
                .nodeTypeId(data.databricks_node_type().smallest().id())
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.cluster.profile", "serverless")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
        }
    }
    
    resources:
      clusterWithTableAccessControl:
        type: databricks:Cluster
        properties:
          clusterName: Shared High-Concurrency
          sparkVersion: ${data.databricks_spark_version.latest_lts.id}
          nodeTypeId: ${data.databricks_node_type.smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.cluster.profile: serverless
          customTags:
            ResourceClass: Serverless
    
    DataSecurityMode string
    Select the security features of the cluster. Unity Catalog requires SINGLE_USER or USER_ISOLATION mode. LEGACY_PASSTHROUGH for passthrough cluster and LEGACY_TABLE_ACL for Table ACL cluster. If omitted, no security features are enabled. In the Databricks UI, this has been recently been renamed Access Mode and USER_ISOLATION has been renamed Shared, but use these terms here.
    DockerImage ClusterDockerImage
    DriverInstancePoolId string
    similar to instance_pool_id, but for driver node. If omitted, and instance_pool_id is specified, then the driver will be allocated from that pool.
    DriverNodeTypeId string
    The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as node_type_id defined above.
    EnableElasticDisk bool
    If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have autotermination_minutes and autoscale attributes set. More documentation available at cluster configuration page.
    EnableLocalDiskEncryption bool
    Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.
    GcpAttributes ClusterGcpAttributes
    IdempotencyToken string
    An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
    InitScripts List<ClusterInitScript>
    InstancePoolId string
    To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to TERMINATED, the instances it used are returned to the pool and reused by a different cluster.
    IsPinned bool

    boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is limited to 100, so apply may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).

    The following example demonstrates how to create an autoscaling cluster with Delta Cache enabled:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", { clusterName: "Shared Autoscaling", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, autoscale: { minWorkers: 1, maxWorkers: 50, }, sparkConf: { "spark.databricks.io.cache.enabled": true, "spark.databricks.io.cache.maxDiskUsage": "50g", "spark.databricks.io.cache.maxMetaDataCache": "1g", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ),
        spark_conf={
            "spark.databricks.io.cache.enabled": True,
            "spark.databricks.io.cache.maxDiskUsage": "50g",
            "spark.databricks.io.cache.maxMetaDataCache": "1g",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
            SparkConf = 
            {
                { "spark.databricks.io.cache.enabled", true },
                { "spark.databricks.io.cache.maxDiskUsage", "50g" },
                { "spark.databricks.io.cache.maxMetaDataCache", "1g" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    			SparkConf: pulumi.Map{
    				"spark.databricks.io.cache.enabled":          pulumi.Any(true),
    				"spark.databricks.io.cache.maxDiskUsage":     pulumi.Any("50g"),
    				"spark.databricks.io.cache.maxMetaDataCache": pulumi.Any("1g"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.io.cache.enabled", true),
                    Map.entry("spark.databricks.io.cache.maxDiskUsage", "50g"),
                    Map.entry("spark.databricks.io.cache.maxMetaDataCache", "1g")
                ))
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
          sparkConf:
            spark.databricks.io.cache.enabled: true
            spark.databricks.io.cache.maxDiskUsage: 50g
            spark.databricks.io.cache.maxMetaDataCache: 1g
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    Libraries List<ClusterLibrary>
    NodeTypeId string
    Any supported databricks.getNodeType id. If instance_pool_id is specified, this field is not needed.
    NumWorkers int
    Number of worker nodes that this cluster should have. A cluster has one Spark driver and num_workers executors for a total of num_workers + 1 Spark nodes.
    PolicyId string
    RuntimeEngine string
    The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: PHOTON, STANDARD.
    SingleUserName string
    The optional user name of the user to assign to an interactive cluster. This field is required when using data_security_mode set to SINGLE_USER or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
    SparkConf Dictionary<string, object>
    should have following items:

    • spark.databricks.repl.allowedLanguages set to a list of supported languages, for example: python,sql, or python,sql,r. Scala is not supported!
    • spark.databricks.cluster.profile set to serverless
    SparkEnvVars Dictionary<string, object>
    Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
    SshPublicKeys List<string>
    SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
    WorkloadType ClusterWorkloadType
    SparkVersion string
    Runtime version of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
    ApplyPolicyDefaultValues bool
    Whether to use policy default values for missing cluster attributes.
    Autoscale ClusterAutoscaleArgs
    AutoterminationMinutes int
    Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to 60. We highly recommend having this setting present for Interactive/BI clusters.
    AwsAttributes ClusterAwsAttributesArgs
    AzureAttributes ClusterAzureAttributesArgs
    ClusterId string
    ClusterLogConf ClusterClusterLogConfArgs
    ClusterMountInfos []ClusterClusterMountInfoArgs
    ClusterName string
    Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
    CustomTags map[string]interface{}

    should have tag ResourceClass set to value Serverless

    For example:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const clusterWithTableAccessControl = new databricks.Cluster("clusterWithTableAccessControl", { clusterName: "Shared High-Concurrency", sparkVersion: data.databricks_spark_version.latest_lts.id, nodeTypeId: data.databricks_node_type.smallest.id, autoterminationMinutes: 20, sparkConf: { "spark.databricks.repl.allowedLanguages": "python,sql", "spark.databricks.cluster.profile": "serverless", }, customTags: { ResourceClass: "Serverless", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    cluster_with_table_access_control = databricks.Cluster("clusterWithTableAccessControl",
        cluster_name="Shared High-Concurrency",
        spark_version=data["databricks_spark_version"]["latest_lts"]["id"],
        node_type_id=data["databricks_node_type"]["smallest"]["id"],
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.cluster.profile": "serverless",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var clusterWithTableAccessControl = new Databricks.Cluster("clusterWithTableAccessControl", new()
        {
            ClusterName = "Shared High-Concurrency",
            SparkVersion = data.Databricks_spark_version.Latest_lts.Id,
            NodeTypeId = data.Databricks_node_type.Smallest.Id,
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.cluster.profile", "serverless" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "clusterWithTableAccessControl", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared High-Concurrency"),
    			SparkVersion:           pulumi.Any(data.Databricks_spark_version.Latest_lts.Id),
    			NodeTypeId:             pulumi.Any(data.Databricks_node_type.Smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.repl.allowedLanguages": pulumi.Any("python,sql"),
    				"spark.databricks.cluster.profile":       pulumi.Any("serverless"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var clusterWithTableAccessControl = new Cluster("clusterWithTableAccessControl", ClusterArgs.builder()        
                .clusterName("Shared High-Concurrency")
                .sparkVersion(data.databricks_spark_version().latest_lts().id())
                .nodeTypeId(data.databricks_node_type().smallest().id())
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.cluster.profile", "serverless")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
        }
    }
    
    resources:
      clusterWithTableAccessControl:
        type: databricks:Cluster
        properties:
          clusterName: Shared High-Concurrency
          sparkVersion: ${data.databricks_spark_version.latest_lts.id}
          nodeTypeId: ${data.databricks_node_type.smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.cluster.profile: serverless
          customTags:
            ResourceClass: Serverless
    
    DataSecurityMode string
    Select the security features of the cluster. Unity Catalog requires SINGLE_USER or USER_ISOLATION mode. LEGACY_PASSTHROUGH for passthrough cluster and LEGACY_TABLE_ACL for Table ACL cluster. If omitted, no security features are enabled. In the Databricks UI, this has been recently been renamed Access Mode and USER_ISOLATION has been renamed Shared, but use these terms here.
    DockerImage ClusterDockerImageArgs
    DriverInstancePoolId string
    similar to instance_pool_id, but for driver node. If omitted, and instance_pool_id is specified, then the driver will be allocated from that pool.
    DriverNodeTypeId string
    The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as node_type_id defined above.
    EnableElasticDisk bool
    If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have autotermination_minutes and autoscale attributes set. More documentation available at cluster configuration page.
    EnableLocalDiskEncryption bool
    Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.
    GcpAttributes ClusterGcpAttributesArgs
    IdempotencyToken string
    An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
    InitScripts []ClusterInitScriptArgs
    InstancePoolId string
    To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to TERMINATED, the instances it used are returned to the pool and reused by a different cluster.
    IsPinned bool

    boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is limited to 100, so apply may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).

    The following example demonstrates how to create an autoscaling cluster with Delta Cache enabled:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", { clusterName: "Shared Autoscaling", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, autoscale: { minWorkers: 1, maxWorkers: 50, }, sparkConf: { "spark.databricks.io.cache.enabled": true, "spark.databricks.io.cache.maxDiskUsage": "50g", "spark.databricks.io.cache.maxMetaDataCache": "1g", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ),
        spark_conf={
            "spark.databricks.io.cache.enabled": True,
            "spark.databricks.io.cache.maxDiskUsage": "50g",
            "spark.databricks.io.cache.maxMetaDataCache": "1g",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
            SparkConf = 
            {
                { "spark.databricks.io.cache.enabled", true },
                { "spark.databricks.io.cache.maxDiskUsage", "50g" },
                { "spark.databricks.io.cache.maxMetaDataCache", "1g" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    			SparkConf: pulumi.Map{
    				"spark.databricks.io.cache.enabled":          pulumi.Any(true),
    				"spark.databricks.io.cache.maxDiskUsage":     pulumi.Any("50g"),
    				"spark.databricks.io.cache.maxMetaDataCache": pulumi.Any("1g"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.io.cache.enabled", true),
                    Map.entry("spark.databricks.io.cache.maxDiskUsage", "50g"),
                    Map.entry("spark.databricks.io.cache.maxMetaDataCache", "1g")
                ))
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
          sparkConf:
            spark.databricks.io.cache.enabled: true
            spark.databricks.io.cache.maxDiskUsage: 50g
            spark.databricks.io.cache.maxMetaDataCache: 1g
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    Libraries []ClusterLibraryArgs
    NodeTypeId string
    Any supported databricks.getNodeType id. If instance_pool_id is specified, this field is not needed.
    NumWorkers int
    Number of worker nodes that this cluster should have. A cluster has one Spark driver and num_workers executors for a total of num_workers + 1 Spark nodes.
    PolicyId string
    RuntimeEngine string
    The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: PHOTON, STANDARD.
    SingleUserName string
    The optional user name of the user to assign to an interactive cluster. This field is required when using data_security_mode set to SINGLE_USER or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
    SparkConf map[string]interface{}
    should have following items:

    • spark.databricks.repl.allowedLanguages set to a list of supported languages, for example: python,sql, or python,sql,r. Scala is not supported!
    • spark.databricks.cluster.profile set to serverless
    SparkEnvVars map[string]interface{}
    Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
    SshPublicKeys []string
    SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
    WorkloadType ClusterWorkloadTypeArgs
    sparkVersion String
    Runtime version of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
    applyPolicyDefaultValues Boolean
    Whether to use policy default values for missing cluster attributes.
    autoscale ClusterAutoscale
    autoterminationMinutes Integer
    Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to 60. We highly recommend having this setting present for Interactive/BI clusters.
    awsAttributes ClusterAwsAttributes
    azureAttributes ClusterAzureAttributes
    clusterId String
    clusterLogConf ClusterClusterLogConf
    clusterMountInfos List<ClusterClusterMountInfo>
    clusterName String
    Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
    customTags Map<String,Object>

    should have tag ResourceClass set to value Serverless

    For example:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const clusterWithTableAccessControl = new databricks.Cluster("clusterWithTableAccessControl", { clusterName: "Shared High-Concurrency", sparkVersion: data.databricks_spark_version.latest_lts.id, nodeTypeId: data.databricks_node_type.smallest.id, autoterminationMinutes: 20, sparkConf: { "spark.databricks.repl.allowedLanguages": "python,sql", "spark.databricks.cluster.profile": "serverless", }, customTags: { ResourceClass: "Serverless", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    cluster_with_table_access_control = databricks.Cluster("clusterWithTableAccessControl",
        cluster_name="Shared High-Concurrency",
        spark_version=data["databricks_spark_version"]["latest_lts"]["id"],
        node_type_id=data["databricks_node_type"]["smallest"]["id"],
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.cluster.profile": "serverless",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var clusterWithTableAccessControl = new Databricks.Cluster("clusterWithTableAccessControl", new()
        {
            ClusterName = "Shared High-Concurrency",
            SparkVersion = data.Databricks_spark_version.Latest_lts.Id,
            NodeTypeId = data.Databricks_node_type.Smallest.Id,
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.cluster.profile", "serverless" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "clusterWithTableAccessControl", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared High-Concurrency"),
    			SparkVersion:           pulumi.Any(data.Databricks_spark_version.Latest_lts.Id),
    			NodeTypeId:             pulumi.Any(data.Databricks_node_type.Smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.repl.allowedLanguages": pulumi.Any("python,sql"),
    				"spark.databricks.cluster.profile":       pulumi.Any("serverless"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var clusterWithTableAccessControl = new Cluster("clusterWithTableAccessControl", ClusterArgs.builder()        
                .clusterName("Shared High-Concurrency")
                .sparkVersion(data.databricks_spark_version().latest_lts().id())
                .nodeTypeId(data.databricks_node_type().smallest().id())
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.cluster.profile", "serverless")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
        }
    }
    
    resources:
      clusterWithTableAccessControl:
        type: databricks:Cluster
        properties:
          clusterName: Shared High-Concurrency
          sparkVersion: ${data.databricks_spark_version.latest_lts.id}
          nodeTypeId: ${data.databricks_node_type.smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.cluster.profile: serverless
          customTags:
            ResourceClass: Serverless
    
    dataSecurityMode String
    Select the security features of the cluster. Unity Catalog requires SINGLE_USER or USER_ISOLATION mode. LEGACY_PASSTHROUGH for passthrough cluster and LEGACY_TABLE_ACL for Table ACL cluster. If omitted, no security features are enabled. In the Databricks UI, this has been recently been renamed Access Mode and USER_ISOLATION has been renamed Shared, but use these terms here.
    dockerImage ClusterDockerImage
    driverInstancePoolId String
    similar to instance_pool_id, but for driver node. If omitted, and instance_pool_id is specified, then the driver will be allocated from that pool.
    driverNodeTypeId String
    The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as node_type_id defined above.
    enableElasticDisk Boolean
    If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have autotermination_minutes and autoscale attributes set. More documentation available at cluster configuration page.
    enableLocalDiskEncryption Boolean
    Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.
    gcpAttributes ClusterGcpAttributes
    idempotencyToken String
    An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
    initScripts List<ClusterInitScript>
    instancePoolId String
    To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to TERMINATED, the instances it used are returned to the pool and reused by a different cluster.
    isPinned Boolean

    boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is limited to 100, so apply may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).

    The following example demonstrates how to create an autoscaling cluster with Delta Cache enabled:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", { clusterName: "Shared Autoscaling", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, autoscale: { minWorkers: 1, maxWorkers: 50, }, sparkConf: { "spark.databricks.io.cache.enabled": true, "spark.databricks.io.cache.maxDiskUsage": "50g", "spark.databricks.io.cache.maxMetaDataCache": "1g", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ),
        spark_conf={
            "spark.databricks.io.cache.enabled": True,
            "spark.databricks.io.cache.maxDiskUsage": "50g",
            "spark.databricks.io.cache.maxMetaDataCache": "1g",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
            SparkConf = 
            {
                { "spark.databricks.io.cache.enabled", true },
                { "spark.databricks.io.cache.maxDiskUsage", "50g" },
                { "spark.databricks.io.cache.maxMetaDataCache", "1g" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    			SparkConf: pulumi.Map{
    				"spark.databricks.io.cache.enabled":          pulumi.Any(true),
    				"spark.databricks.io.cache.maxDiskUsage":     pulumi.Any("50g"),
    				"spark.databricks.io.cache.maxMetaDataCache": pulumi.Any("1g"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.io.cache.enabled", true),
                    Map.entry("spark.databricks.io.cache.maxDiskUsage", "50g"),
                    Map.entry("spark.databricks.io.cache.maxMetaDataCache", "1g")
                ))
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
          sparkConf:
            spark.databricks.io.cache.enabled: true
            spark.databricks.io.cache.maxDiskUsage: 50g
            spark.databricks.io.cache.maxMetaDataCache: 1g
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    libraries List<ClusterLibrary>
    nodeTypeId String
    Any supported databricks.getNodeType id. If instance_pool_id is specified, this field is not needed.
    numWorkers Integer
    Number of worker nodes that this cluster should have. A cluster has one Spark driver and num_workers executors for a total of num_workers + 1 Spark nodes.
    policyId String
    runtimeEngine String
    The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: PHOTON, STANDARD.
    singleUserName String
    The optional user name of the user to assign to an interactive cluster. This field is required when using data_security_mode set to SINGLE_USER or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
    sparkConf Map<String,Object>
    should have following items:

    • spark.databricks.repl.allowedLanguages set to a list of supported languages, for example: python,sql, or python,sql,r. Scala is not supported!
    • spark.databricks.cluster.profile set to serverless
    sparkEnvVars Map<String,Object>
    Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
    sshPublicKeys List<String>
    SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
    workloadType ClusterWorkloadType
    sparkVersion string
    Runtime version of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
    applyPolicyDefaultValues boolean
    Whether to use policy default values for missing cluster attributes.
    autoscale ClusterAutoscale
    autoterminationMinutes number
    Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to 60. We highly recommend having this setting present for Interactive/BI clusters.
    awsAttributes ClusterAwsAttributes
    azureAttributes ClusterAzureAttributes
    clusterId string
    clusterLogConf ClusterClusterLogConf
    clusterMountInfos ClusterClusterMountInfo[]
    clusterName string
    Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
    customTags {[key: string]: any}

    should have tag ResourceClass set to value Serverless

    For example:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const clusterWithTableAccessControl = new databricks.Cluster("clusterWithTableAccessControl", { clusterName: "Shared High-Concurrency", sparkVersion: data.databricks_spark_version.latest_lts.id, nodeTypeId: data.databricks_node_type.smallest.id, autoterminationMinutes: 20, sparkConf: { "spark.databricks.repl.allowedLanguages": "python,sql", "spark.databricks.cluster.profile": "serverless", }, customTags: { ResourceClass: "Serverless", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    cluster_with_table_access_control = databricks.Cluster("clusterWithTableAccessControl",
        cluster_name="Shared High-Concurrency",
        spark_version=data["databricks_spark_version"]["latest_lts"]["id"],
        node_type_id=data["databricks_node_type"]["smallest"]["id"],
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.cluster.profile": "serverless",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var clusterWithTableAccessControl = new Databricks.Cluster("clusterWithTableAccessControl", new()
        {
            ClusterName = "Shared High-Concurrency",
            SparkVersion = data.Databricks_spark_version.Latest_lts.Id,
            NodeTypeId = data.Databricks_node_type.Smallest.Id,
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.cluster.profile", "serverless" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "clusterWithTableAccessControl", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared High-Concurrency"),
    			SparkVersion:           pulumi.Any(data.Databricks_spark_version.Latest_lts.Id),
    			NodeTypeId:             pulumi.Any(data.Databricks_node_type.Smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.repl.allowedLanguages": pulumi.Any("python,sql"),
    				"spark.databricks.cluster.profile":       pulumi.Any("serverless"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var clusterWithTableAccessControl = new Cluster("clusterWithTableAccessControl", ClusterArgs.builder()        
                .clusterName("Shared High-Concurrency")
                .sparkVersion(data.databricks_spark_version().latest_lts().id())
                .nodeTypeId(data.databricks_node_type().smallest().id())
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.cluster.profile", "serverless")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
        }
    }
    
    resources:
      clusterWithTableAccessControl:
        type: databricks:Cluster
        properties:
          clusterName: Shared High-Concurrency
          sparkVersion: ${data.databricks_spark_version.latest_lts.id}
          nodeTypeId: ${data.databricks_node_type.smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.cluster.profile: serverless
          customTags:
            ResourceClass: Serverless
    
    dataSecurityMode string
    Select the security features of the cluster. Unity Catalog requires SINGLE_USER or USER_ISOLATION mode. LEGACY_PASSTHROUGH for passthrough cluster and LEGACY_TABLE_ACL for Table ACL cluster. If omitted, no security features are enabled. In the Databricks UI, this has been recently been renamed Access Mode and USER_ISOLATION has been renamed Shared, but use these terms here.
    dockerImage ClusterDockerImage
    driverInstancePoolId string
    similar to instance_pool_id, but for driver node. If omitted, and instance_pool_id is specified, then the driver will be allocated from that pool.
    driverNodeTypeId string
    The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as node_type_id defined above.
    enableElasticDisk boolean
    If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have autotermination_minutes and autoscale attributes set. More documentation available at cluster configuration page.
    enableLocalDiskEncryption boolean
    Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.
    gcpAttributes ClusterGcpAttributes
    idempotencyToken string
    An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
    initScripts ClusterInitScript[]
    instancePoolId string
    To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to TERMINATED, the instances it used are returned to the pool and reused by a different cluster.
    isPinned boolean

    boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is limited to 100, so apply may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).

    The following example demonstrates how to create an autoscaling cluster with Delta Cache enabled:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", { clusterName: "Shared Autoscaling", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, autoscale: { minWorkers: 1, maxWorkers: 50, }, sparkConf: { "spark.databricks.io.cache.enabled": true, "spark.databricks.io.cache.maxDiskUsage": "50g", "spark.databricks.io.cache.maxMetaDataCache": "1g", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ),
        spark_conf={
            "spark.databricks.io.cache.enabled": True,
            "spark.databricks.io.cache.maxDiskUsage": "50g",
            "spark.databricks.io.cache.maxMetaDataCache": "1g",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
            SparkConf = 
            {
                { "spark.databricks.io.cache.enabled", true },
                { "spark.databricks.io.cache.maxDiskUsage", "50g" },
                { "spark.databricks.io.cache.maxMetaDataCache", "1g" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    			SparkConf: pulumi.Map{
    				"spark.databricks.io.cache.enabled":          pulumi.Any(true),
    				"spark.databricks.io.cache.maxDiskUsage":     pulumi.Any("50g"),
    				"spark.databricks.io.cache.maxMetaDataCache": pulumi.Any("1g"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.io.cache.enabled", true),
                    Map.entry("spark.databricks.io.cache.maxDiskUsage", "50g"),
                    Map.entry("spark.databricks.io.cache.maxMetaDataCache", "1g")
                ))
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
          sparkConf:
            spark.databricks.io.cache.enabled: true
            spark.databricks.io.cache.maxDiskUsage: 50g
            spark.databricks.io.cache.maxMetaDataCache: 1g
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    libraries ClusterLibrary[]
    nodeTypeId string
    Any supported databricks.getNodeType id. If instance_pool_id is specified, this field is not needed.
    numWorkers number
    Number of worker nodes that this cluster should have. A cluster has one Spark driver and num_workers executors for a total of num_workers + 1 Spark nodes.
    policyId string
    runtimeEngine string
    The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: PHOTON, STANDARD.
    singleUserName string
    The optional user name of the user to assign to an interactive cluster. This field is required when using data_security_mode set to SINGLE_USER or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
    sparkConf {[key: string]: any}
    should have following items:

    • spark.databricks.repl.allowedLanguages set to a list of supported languages, for example: python,sql, or python,sql,r. Scala is not supported!
    • spark.databricks.cluster.profile set to serverless
    sparkEnvVars {[key: string]: any}
    Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
    sshPublicKeys string[]
    SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
    workloadType ClusterWorkloadType
    spark_version str
    Runtime version of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
    apply_policy_default_values bool
    Whether to use policy default values for missing cluster attributes.
    autoscale ClusterAutoscaleArgs
    autotermination_minutes int
    Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to 60. We highly recommend having this setting present for Interactive/BI clusters.
    aws_attributes ClusterAwsAttributesArgs
    azure_attributes ClusterAzureAttributesArgs
    cluster_id str
    cluster_log_conf ClusterClusterLogConfArgs
    cluster_mount_infos Sequence[ClusterClusterMountInfoArgs]
    cluster_name str
    Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
    custom_tags Mapping[str, Any]

    should have tag ResourceClass set to value Serverless

    For example:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const clusterWithTableAccessControl = new databricks.Cluster("clusterWithTableAccessControl", { clusterName: "Shared High-Concurrency", sparkVersion: data.databricks_spark_version.latest_lts.id, nodeTypeId: data.databricks_node_type.smallest.id, autoterminationMinutes: 20, sparkConf: { "spark.databricks.repl.allowedLanguages": "python,sql", "spark.databricks.cluster.profile": "serverless", }, customTags: { ResourceClass: "Serverless", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    cluster_with_table_access_control = databricks.Cluster("clusterWithTableAccessControl",
        cluster_name="Shared High-Concurrency",
        spark_version=data["databricks_spark_version"]["latest_lts"]["id"],
        node_type_id=data["databricks_node_type"]["smallest"]["id"],
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.cluster.profile": "serverless",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var clusterWithTableAccessControl = new Databricks.Cluster("clusterWithTableAccessControl", new()
        {
            ClusterName = "Shared High-Concurrency",
            SparkVersion = data.Databricks_spark_version.Latest_lts.Id,
            NodeTypeId = data.Databricks_node_type.Smallest.Id,
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.cluster.profile", "serverless" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "clusterWithTableAccessControl", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared High-Concurrency"),
    			SparkVersion:           pulumi.Any(data.Databricks_spark_version.Latest_lts.Id),
    			NodeTypeId:             pulumi.Any(data.Databricks_node_type.Smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.repl.allowedLanguages": pulumi.Any("python,sql"),
    				"spark.databricks.cluster.profile":       pulumi.Any("serverless"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var clusterWithTableAccessControl = new Cluster("clusterWithTableAccessControl", ClusterArgs.builder()        
                .clusterName("Shared High-Concurrency")
                .sparkVersion(data.databricks_spark_version().latest_lts().id())
                .nodeTypeId(data.databricks_node_type().smallest().id())
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.cluster.profile", "serverless")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
        }
    }
    
    resources:
      clusterWithTableAccessControl:
        type: databricks:Cluster
        properties:
          clusterName: Shared High-Concurrency
          sparkVersion: ${data.databricks_spark_version.latest_lts.id}
          nodeTypeId: ${data.databricks_node_type.smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.cluster.profile: serverless
          customTags:
            ResourceClass: Serverless
    
    data_security_mode str
    Select the security features of the cluster. Unity Catalog requires SINGLE_USER or USER_ISOLATION mode. LEGACY_PASSTHROUGH for passthrough cluster and LEGACY_TABLE_ACL for Table ACL cluster. If omitted, no security features are enabled. In the Databricks UI, this has been recently been renamed Access Mode and USER_ISOLATION has been renamed Shared, but use these terms here.
    docker_image ClusterDockerImageArgs
    driver_instance_pool_id str
    similar to instance_pool_id, but for driver node. If omitted, and instance_pool_id is specified, then the driver will be allocated from that pool.
    driver_node_type_id str
    The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as node_type_id defined above.
    enable_elastic_disk bool
    If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have autotermination_minutes and autoscale attributes set. More documentation available at cluster configuration page.
    enable_local_disk_encryption bool
    Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.
    gcp_attributes ClusterGcpAttributesArgs
    idempotency_token str
    An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
    init_scripts Sequence[ClusterInitScriptArgs]
    instance_pool_id str
    To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to TERMINATED, the instances it used are returned to the pool and reused by a different cluster.
    is_pinned bool

    boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is limited to 100, so apply may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).

    The following example demonstrates how to create an autoscaling cluster with Delta Cache enabled:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", { clusterName: "Shared Autoscaling", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, autoscale: { minWorkers: 1, maxWorkers: 50, }, sparkConf: { "spark.databricks.io.cache.enabled": true, "spark.databricks.io.cache.maxDiskUsage": "50g", "spark.databricks.io.cache.maxMetaDataCache": "1g", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ),
        spark_conf={
            "spark.databricks.io.cache.enabled": True,
            "spark.databricks.io.cache.maxDiskUsage": "50g",
            "spark.databricks.io.cache.maxMetaDataCache": "1g",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
            SparkConf = 
            {
                { "spark.databricks.io.cache.enabled", true },
                { "spark.databricks.io.cache.maxDiskUsage", "50g" },
                { "spark.databricks.io.cache.maxMetaDataCache", "1g" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    			SparkConf: pulumi.Map{
    				"spark.databricks.io.cache.enabled":          pulumi.Any(true),
    				"spark.databricks.io.cache.maxDiskUsage":     pulumi.Any("50g"),
    				"spark.databricks.io.cache.maxMetaDataCache": pulumi.Any("1g"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.io.cache.enabled", true),
                    Map.entry("spark.databricks.io.cache.maxDiskUsage", "50g"),
                    Map.entry("spark.databricks.io.cache.maxMetaDataCache", "1g")
                ))
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
          sparkConf:
            spark.databricks.io.cache.enabled: true
            spark.databricks.io.cache.maxDiskUsage: 50g
            spark.databricks.io.cache.maxMetaDataCache: 1g
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    libraries Sequence[ClusterLibraryArgs]
    node_type_id str
    Any supported databricks.getNodeType id. If instance_pool_id is specified, this field is not needed.
    num_workers int
    Number of worker nodes that this cluster should have. A cluster has one Spark driver and num_workers executors for a total of num_workers + 1 Spark nodes.
    policy_id str
    runtime_engine str
    The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: PHOTON, STANDARD.
    single_user_name str
    The optional user name of the user to assign to an interactive cluster. This field is required when using data_security_mode set to SINGLE_USER or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
    spark_conf Mapping[str, Any]
    should have following items:

    • spark.databricks.repl.allowedLanguages set to a list of supported languages, for example: python,sql, or python,sql,r. Scala is not supported!
    • spark.databricks.cluster.profile set to serverless
    spark_env_vars Mapping[str, Any]
    Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
    ssh_public_keys Sequence[str]
    SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
    workload_type ClusterWorkloadTypeArgs
    sparkVersion String
    Runtime version of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
    applyPolicyDefaultValues Boolean
    Whether to use policy default values for missing cluster attributes.
    autoscale Property Map
    autoterminationMinutes Number
    Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to 60. We highly recommend having this setting present for Interactive/BI clusters.
    awsAttributes Property Map
    azureAttributes Property Map
    clusterId String
    clusterLogConf Property Map
    clusterMountInfos List<Property Map>
    clusterName String
    Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
    customTags Map<Any>

    should have tag ResourceClass set to value Serverless

    For example:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const clusterWithTableAccessControl = new databricks.Cluster("clusterWithTableAccessControl", { clusterName: "Shared High-Concurrency", sparkVersion: data.databricks_spark_version.latest_lts.id, nodeTypeId: data.databricks_node_type.smallest.id, autoterminationMinutes: 20, sparkConf: { "spark.databricks.repl.allowedLanguages": "python,sql", "spark.databricks.cluster.profile": "serverless", }, customTags: { ResourceClass: "Serverless", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    cluster_with_table_access_control = databricks.Cluster("clusterWithTableAccessControl",
        cluster_name="Shared High-Concurrency",
        spark_version=data["databricks_spark_version"]["latest_lts"]["id"],
        node_type_id=data["databricks_node_type"]["smallest"]["id"],
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.cluster.profile": "serverless",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var clusterWithTableAccessControl = new Databricks.Cluster("clusterWithTableAccessControl", new()
        {
            ClusterName = "Shared High-Concurrency",
            SparkVersion = data.Databricks_spark_version.Latest_lts.Id,
            NodeTypeId = data.Databricks_node_type.Smallest.Id,
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.cluster.profile", "serverless" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "clusterWithTableAccessControl", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared High-Concurrency"),
    			SparkVersion:           pulumi.Any(data.Databricks_spark_version.Latest_lts.Id),
    			NodeTypeId:             pulumi.Any(data.Databricks_node_type.Smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.repl.allowedLanguages": pulumi.Any("python,sql"),
    				"spark.databricks.cluster.profile":       pulumi.Any("serverless"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var clusterWithTableAccessControl = new Cluster("clusterWithTableAccessControl", ClusterArgs.builder()        
                .clusterName("Shared High-Concurrency")
                .sparkVersion(data.databricks_spark_version().latest_lts().id())
                .nodeTypeId(data.databricks_node_type().smallest().id())
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.cluster.profile", "serverless")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
        }
    }
    
    resources:
      clusterWithTableAccessControl:
        type: databricks:Cluster
        properties:
          clusterName: Shared High-Concurrency
          sparkVersion: ${data.databricks_spark_version.latest_lts.id}
          nodeTypeId: ${data.databricks_node_type.smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.cluster.profile: serverless
          customTags:
            ResourceClass: Serverless
    
    dataSecurityMode String
    Select the security features of the cluster. Unity Catalog requires SINGLE_USER or USER_ISOLATION mode. LEGACY_PASSTHROUGH for passthrough cluster and LEGACY_TABLE_ACL for Table ACL cluster. If omitted, no security features are enabled. In the Databricks UI, this has been recently been renamed Access Mode and USER_ISOLATION has been renamed Shared, but use these terms here.
    dockerImage Property Map
    driverInstancePoolId String
    similar to instance_pool_id, but for driver node. If omitted, and instance_pool_id is specified, then the driver will be allocated from that pool.
    driverNodeTypeId String
    The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as node_type_id defined above.
    enableElasticDisk Boolean
    If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have autotermination_minutes and autoscale attributes set. More documentation available at cluster configuration page.
    enableLocalDiskEncryption Boolean
    Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.
    gcpAttributes Property Map
    idempotencyToken String
    An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
    initScripts List<Property Map>
    instancePoolId String
    To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to TERMINATED, the instances it used are returned to the pool and reused by a different cluster.
    isPinned Boolean

    boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is limited to 100, so apply may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).

    The following example demonstrates how to create an autoscaling cluster with Delta Cache enabled:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", { clusterName: "Shared Autoscaling", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, autoscale: { minWorkers: 1, maxWorkers: 50, }, sparkConf: { "spark.databricks.io.cache.enabled": true, "spark.databricks.io.cache.maxDiskUsage": "50g", "spark.databricks.io.cache.maxMetaDataCache": "1g", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ),
        spark_conf={
            "spark.databricks.io.cache.enabled": True,
            "spark.databricks.io.cache.maxDiskUsage": "50g",
            "spark.databricks.io.cache.maxMetaDataCache": "1g",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
            SparkConf = 
            {
                { "spark.databricks.io.cache.enabled", true },
                { "spark.databricks.io.cache.maxDiskUsage", "50g" },
                { "spark.databricks.io.cache.maxMetaDataCache", "1g" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    			SparkConf: pulumi.Map{
    				"spark.databricks.io.cache.enabled":          pulumi.Any(true),
    				"spark.databricks.io.cache.maxDiskUsage":     pulumi.Any("50g"),
    				"spark.databricks.io.cache.maxMetaDataCache": pulumi.Any("1g"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.io.cache.enabled", true),
                    Map.entry("spark.databricks.io.cache.maxDiskUsage", "50g"),
                    Map.entry("spark.databricks.io.cache.maxMetaDataCache", "1g")
                ))
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
          sparkConf:
            spark.databricks.io.cache.enabled: true
            spark.databricks.io.cache.maxDiskUsage: 50g
            spark.databricks.io.cache.maxMetaDataCache: 1g
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    libraries List<Property Map>
    nodeTypeId String
    Any supported databricks.getNodeType id. If instance_pool_id is specified, this field is not needed.
    numWorkers Number
    Number of worker nodes that this cluster should have. A cluster has one Spark driver and num_workers executors for a total of num_workers + 1 Spark nodes.
    policyId String
    runtimeEngine String
    The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: PHOTON, STANDARD.
    singleUserName String
    The optional user name of the user to assign to an interactive cluster. This field is required when using data_security_mode set to SINGLE_USER or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
    sparkConf Map<Any>
    should have following items:

    • spark.databricks.repl.allowedLanguages set to a list of supported languages, for example: python,sql, or python,sql,r. Scala is not supported!
    • spark.databricks.cluster.profile set to serverless
    sparkEnvVars Map<Any>
    Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
    sshPublicKeys List<String>
    SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
    workloadType Property Map

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Cluster resource produces the following output properties:

    ClusterSource string
    DefaultTags Dictionary<string, object>
    (map) Tags that are added by Databricks by default, regardless of any custom_tags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: , and any workspace and pool tags.
    Id string
    The provider-assigned unique ID for this managed resource.
    State string
    (string) State of the cluster.
    Url string
    URL for the Docker image
    ClusterSource string
    DefaultTags map[string]interface{}
    (map) Tags that are added by Databricks by default, regardless of any custom_tags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: , and any workspace and pool tags.
    Id string
    The provider-assigned unique ID for this managed resource.
    State string
    (string) State of the cluster.
    Url string
    URL for the Docker image
    clusterSource String
    defaultTags Map<String,Object>
    (map) Tags that are added by Databricks by default, regardless of any custom_tags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: , and any workspace and pool tags.
    id String
    The provider-assigned unique ID for this managed resource.
    state String
    (string) State of the cluster.
    url String
    URL for the Docker image
    clusterSource string
    defaultTags {[key: string]: any}
    (map) Tags that are added by Databricks by default, regardless of any custom_tags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: , and any workspace and pool tags.
    id string
    The provider-assigned unique ID for this managed resource.
    state string
    (string) State of the cluster.
    url string
    URL for the Docker image
    cluster_source str
    default_tags Mapping[str, Any]
    (map) Tags that are added by Databricks by default, regardless of any custom_tags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: , and any workspace and pool tags.
    id str
    The provider-assigned unique ID for this managed resource.
    state str
    (string) State of the cluster.
    url str
    URL for the Docker image
    clusterSource String
    defaultTags Map<Any>
    (map) Tags that are added by Databricks by default, regardless of any custom_tags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: , and any workspace and pool tags.
    id String
    The provider-assigned unique ID for this managed resource.
    state String
    (string) State of the cluster.
    url String
    URL for the Docker image

    Look up Existing Cluster Resource

    Get an existing Cluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: ClusterState, opts?: CustomResourceOptions): Cluster
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            apply_policy_default_values: Optional[bool] = None,
            autoscale: Optional[ClusterAutoscaleArgs] = None,
            autotermination_minutes: Optional[int] = None,
            aws_attributes: Optional[ClusterAwsAttributesArgs] = None,
            azure_attributes: Optional[ClusterAzureAttributesArgs] = None,
            cluster_id: Optional[str] = None,
            cluster_log_conf: Optional[ClusterClusterLogConfArgs] = None,
            cluster_mount_infos: Optional[Sequence[ClusterClusterMountInfoArgs]] = None,
            cluster_name: Optional[str] = None,
            cluster_source: Optional[str] = None,
            custom_tags: Optional[Mapping[str, Any]] = None,
            data_security_mode: Optional[str] = None,
            default_tags: Optional[Mapping[str, Any]] = None,
            docker_image: Optional[ClusterDockerImageArgs] = None,
            driver_instance_pool_id: Optional[str] = None,
            driver_node_type_id: Optional[str] = None,
            enable_elastic_disk: Optional[bool] = None,
            enable_local_disk_encryption: Optional[bool] = None,
            gcp_attributes: Optional[ClusterGcpAttributesArgs] = None,
            idempotency_token: Optional[str] = None,
            init_scripts: Optional[Sequence[ClusterInitScriptArgs]] = None,
            instance_pool_id: Optional[str] = None,
            is_pinned: Optional[bool] = None,
            libraries: Optional[Sequence[ClusterLibraryArgs]] = None,
            node_type_id: Optional[str] = None,
            num_workers: Optional[int] = None,
            policy_id: Optional[str] = None,
            runtime_engine: Optional[str] = None,
            single_user_name: Optional[str] = None,
            spark_conf: Optional[Mapping[str, Any]] = None,
            spark_env_vars: Optional[Mapping[str, Any]] = None,
            spark_version: Optional[str] = None,
            ssh_public_keys: Optional[Sequence[str]] = None,
            state: Optional[str] = None,
            url: Optional[str] = None,
            workload_type: Optional[ClusterWorkloadTypeArgs] = None) -> Cluster
    func GetCluster(ctx *Context, name string, id IDInput, state *ClusterState, opts ...ResourceOption) (*Cluster, error)
    public static Cluster Get(string name, Input<string> id, ClusterState? state, CustomResourceOptions? opts = null)
    public static Cluster get(String name, Output<String> id, ClusterState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    ApplyPolicyDefaultValues bool
    Whether to use policy default values for missing cluster attributes.
    Autoscale ClusterAutoscale
    AutoterminationMinutes int
    Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to 60. We highly recommend having this setting present for Interactive/BI clusters.
    AwsAttributes ClusterAwsAttributes
    AzureAttributes ClusterAzureAttributes
    ClusterId string
    ClusterLogConf ClusterClusterLogConf
    ClusterMountInfos List<ClusterClusterMountInfo>
    ClusterName string
    Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
    ClusterSource string
    CustomTags Dictionary<string, object>

    should have tag ResourceClass set to value Serverless

    For example:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const clusterWithTableAccessControl = new databricks.Cluster("clusterWithTableAccessControl", { clusterName: "Shared High-Concurrency", sparkVersion: data.databricks_spark_version.latest_lts.id, nodeTypeId: data.databricks_node_type.smallest.id, autoterminationMinutes: 20, sparkConf: { "spark.databricks.repl.allowedLanguages": "python,sql", "spark.databricks.cluster.profile": "serverless", }, customTags: { ResourceClass: "Serverless", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    cluster_with_table_access_control = databricks.Cluster("clusterWithTableAccessControl",
        cluster_name="Shared High-Concurrency",
        spark_version=data["databricks_spark_version"]["latest_lts"]["id"],
        node_type_id=data["databricks_node_type"]["smallest"]["id"],
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.cluster.profile": "serverless",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var clusterWithTableAccessControl = new Databricks.Cluster("clusterWithTableAccessControl", new()
        {
            ClusterName = "Shared High-Concurrency",
            SparkVersion = data.Databricks_spark_version.Latest_lts.Id,
            NodeTypeId = data.Databricks_node_type.Smallest.Id,
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.cluster.profile", "serverless" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "clusterWithTableAccessControl", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared High-Concurrency"),
    			SparkVersion:           pulumi.Any(data.Databricks_spark_version.Latest_lts.Id),
    			NodeTypeId:             pulumi.Any(data.Databricks_node_type.Smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.repl.allowedLanguages": pulumi.Any("python,sql"),
    				"spark.databricks.cluster.profile":       pulumi.Any("serverless"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var clusterWithTableAccessControl = new Cluster("clusterWithTableAccessControl", ClusterArgs.builder()        
                .clusterName("Shared High-Concurrency")
                .sparkVersion(data.databricks_spark_version().latest_lts().id())
                .nodeTypeId(data.databricks_node_type().smallest().id())
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.cluster.profile", "serverless")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
        }
    }
    
    resources:
      clusterWithTableAccessControl:
        type: databricks:Cluster
        properties:
          clusterName: Shared High-Concurrency
          sparkVersion: ${data.databricks_spark_version.latest_lts.id}
          nodeTypeId: ${data.databricks_node_type.smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.cluster.profile: serverless
          customTags:
            ResourceClass: Serverless
    
    DataSecurityMode string
    Select the security features of the cluster. Unity Catalog requires SINGLE_USER or USER_ISOLATION mode. LEGACY_PASSTHROUGH for passthrough cluster and LEGACY_TABLE_ACL for Table ACL cluster. If omitted, no security features are enabled. In the Databricks UI, this has been recently been renamed Access Mode and USER_ISOLATION has been renamed Shared, but use these terms here.
    DefaultTags Dictionary<string, object>
    (map) Tags that are added by Databricks by default, regardless of any custom_tags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: , and any workspace and pool tags.
    DockerImage ClusterDockerImage
    DriverInstancePoolId string
    similar to instance_pool_id, but for driver node. If omitted, and instance_pool_id is specified, then the driver will be allocated from that pool.
    DriverNodeTypeId string
    The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as node_type_id defined above.
    EnableElasticDisk bool
    If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have autotermination_minutes and autoscale attributes set. More documentation available at cluster configuration page.
    EnableLocalDiskEncryption bool
    Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.
    GcpAttributes ClusterGcpAttributes
    IdempotencyToken string
    An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
    InitScripts List<ClusterInitScript>
    InstancePoolId string
    To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to TERMINATED, the instances it used are returned to the pool and reused by a different cluster.
    IsPinned bool

    boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is limited to 100, so apply may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).

    The following example demonstrates how to create an autoscaling cluster with Delta Cache enabled:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", { clusterName: "Shared Autoscaling", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, autoscale: { minWorkers: 1, maxWorkers: 50, }, sparkConf: { "spark.databricks.io.cache.enabled": true, "spark.databricks.io.cache.maxDiskUsage": "50g", "spark.databricks.io.cache.maxMetaDataCache": "1g", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ),
        spark_conf={
            "spark.databricks.io.cache.enabled": True,
            "spark.databricks.io.cache.maxDiskUsage": "50g",
            "spark.databricks.io.cache.maxMetaDataCache": "1g",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
            SparkConf = 
            {
                { "spark.databricks.io.cache.enabled", true },
                { "spark.databricks.io.cache.maxDiskUsage", "50g" },
                { "spark.databricks.io.cache.maxMetaDataCache", "1g" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    			SparkConf: pulumi.Map{
    				"spark.databricks.io.cache.enabled":          pulumi.Any(true),
    				"spark.databricks.io.cache.maxDiskUsage":     pulumi.Any("50g"),
    				"spark.databricks.io.cache.maxMetaDataCache": pulumi.Any("1g"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.io.cache.enabled", true),
                    Map.entry("spark.databricks.io.cache.maxDiskUsage", "50g"),
                    Map.entry("spark.databricks.io.cache.maxMetaDataCache", "1g")
                ))
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
          sparkConf:
            spark.databricks.io.cache.enabled: true
            spark.databricks.io.cache.maxDiskUsage: 50g
            spark.databricks.io.cache.maxMetaDataCache: 1g
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    Libraries List<ClusterLibrary>
    NodeTypeId string
    Any supported databricks.getNodeType id. If instance_pool_id is specified, this field is not needed.
    NumWorkers int
    Number of worker nodes that this cluster should have. A cluster has one Spark driver and num_workers executors for a total of num_workers + 1 Spark nodes.
    PolicyId string
    RuntimeEngine string
    The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: PHOTON, STANDARD.
    SingleUserName string
    The optional user name of the user to assign to an interactive cluster. This field is required when using data_security_mode set to SINGLE_USER or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
    SparkConf Dictionary<string, object>
    should have following items:

    • spark.databricks.repl.allowedLanguages set to a list of supported languages, for example: python,sql, or python,sql,r. Scala is not supported!
    • spark.databricks.cluster.profile set to serverless
    SparkEnvVars Dictionary<string, object>
    Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
    SparkVersion string
    Runtime version of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
    SshPublicKeys List<string>
    SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
    State string
    (string) State of the cluster.
    Url string
    URL for the Docker image
    WorkloadType ClusterWorkloadType
    ApplyPolicyDefaultValues bool
    Whether to use policy default values for missing cluster attributes.
    Autoscale ClusterAutoscaleArgs
    AutoterminationMinutes int
    Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to 60. We highly recommend having this setting present for Interactive/BI clusters.
    AwsAttributes ClusterAwsAttributesArgs
    AzureAttributes ClusterAzureAttributesArgs
    ClusterId string
    ClusterLogConf ClusterClusterLogConfArgs
    ClusterMountInfos []ClusterClusterMountInfoArgs
    ClusterName string
    Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
    ClusterSource string
    CustomTags map[string]interface{}

    should have tag ResourceClass set to value Serverless

    For example:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const clusterWithTableAccessControl = new databricks.Cluster("clusterWithTableAccessControl", { clusterName: "Shared High-Concurrency", sparkVersion: data.databricks_spark_version.latest_lts.id, nodeTypeId: data.databricks_node_type.smallest.id, autoterminationMinutes: 20, sparkConf: { "spark.databricks.repl.allowedLanguages": "python,sql", "spark.databricks.cluster.profile": "serverless", }, customTags: { ResourceClass: "Serverless", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    cluster_with_table_access_control = databricks.Cluster("clusterWithTableAccessControl",
        cluster_name="Shared High-Concurrency",
        spark_version=data["databricks_spark_version"]["latest_lts"]["id"],
        node_type_id=data["databricks_node_type"]["smallest"]["id"],
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.cluster.profile": "serverless",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var clusterWithTableAccessControl = new Databricks.Cluster("clusterWithTableAccessControl", new()
        {
            ClusterName = "Shared High-Concurrency",
            SparkVersion = data.Databricks_spark_version.Latest_lts.Id,
            NodeTypeId = data.Databricks_node_type.Smallest.Id,
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.cluster.profile", "serverless" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "clusterWithTableAccessControl", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared High-Concurrency"),
    			SparkVersion:           pulumi.Any(data.Databricks_spark_version.Latest_lts.Id),
    			NodeTypeId:             pulumi.Any(data.Databricks_node_type.Smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.repl.allowedLanguages": pulumi.Any("python,sql"),
    				"spark.databricks.cluster.profile":       pulumi.Any("serverless"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var clusterWithTableAccessControl = new Cluster("clusterWithTableAccessControl", ClusterArgs.builder()        
                .clusterName("Shared High-Concurrency")
                .sparkVersion(data.databricks_spark_version().latest_lts().id())
                .nodeTypeId(data.databricks_node_type().smallest().id())
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.cluster.profile", "serverless")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
        }
    }
    
    resources:
      clusterWithTableAccessControl:
        type: databricks:Cluster
        properties:
          clusterName: Shared High-Concurrency
          sparkVersion: ${data.databricks_spark_version.latest_lts.id}
          nodeTypeId: ${data.databricks_node_type.smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.cluster.profile: serverless
          customTags:
            ResourceClass: Serverless
    
    DataSecurityMode string
    Select the security features of the cluster. Unity Catalog requires SINGLE_USER or USER_ISOLATION mode. LEGACY_PASSTHROUGH for passthrough cluster and LEGACY_TABLE_ACL for Table ACL cluster. If omitted, no security features are enabled. In the Databricks UI, this has been recently been renamed Access Mode and USER_ISOLATION has been renamed Shared, but use these terms here.
    DefaultTags map[string]interface{}
    (map) Tags that are added by Databricks by default, regardless of any custom_tags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: , and any workspace and pool tags.
    DockerImage ClusterDockerImageArgs
    DriverInstancePoolId string
    similar to instance_pool_id, but for driver node. If omitted, and instance_pool_id is specified, then the driver will be allocated from that pool.
    DriverNodeTypeId string
    The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as node_type_id defined above.
    EnableElasticDisk bool
    If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have autotermination_minutes and autoscale attributes set. More documentation available at cluster configuration page.
    EnableLocalDiskEncryption bool
    Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.
    GcpAttributes ClusterGcpAttributesArgs
    IdempotencyToken string
    An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
    InitScripts []ClusterInitScriptArgs
    InstancePoolId string
    To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to TERMINATED, the instances it used are returned to the pool and reused by a different cluster.
    IsPinned bool

    boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is limited to 100, so apply may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).

    The following example demonstrates how to create an autoscaling cluster with Delta Cache enabled:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", { clusterName: "Shared Autoscaling", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, autoscale: { minWorkers: 1, maxWorkers: 50, }, sparkConf: { "spark.databricks.io.cache.enabled": true, "spark.databricks.io.cache.maxDiskUsage": "50g", "spark.databricks.io.cache.maxMetaDataCache": "1g", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ),
        spark_conf={
            "spark.databricks.io.cache.enabled": True,
            "spark.databricks.io.cache.maxDiskUsage": "50g",
            "spark.databricks.io.cache.maxMetaDataCache": "1g",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
            SparkConf = 
            {
                { "spark.databricks.io.cache.enabled", true },
                { "spark.databricks.io.cache.maxDiskUsage", "50g" },
                { "spark.databricks.io.cache.maxMetaDataCache", "1g" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    			SparkConf: pulumi.Map{
    				"spark.databricks.io.cache.enabled":          pulumi.Any(true),
    				"spark.databricks.io.cache.maxDiskUsage":     pulumi.Any("50g"),
    				"spark.databricks.io.cache.maxMetaDataCache": pulumi.Any("1g"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.io.cache.enabled", true),
                    Map.entry("spark.databricks.io.cache.maxDiskUsage", "50g"),
                    Map.entry("spark.databricks.io.cache.maxMetaDataCache", "1g")
                ))
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
          sparkConf:
            spark.databricks.io.cache.enabled: true
            spark.databricks.io.cache.maxDiskUsage: 50g
            spark.databricks.io.cache.maxMetaDataCache: 1g
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    Libraries []ClusterLibraryArgs
    NodeTypeId string
    Any supported databricks.getNodeType id. If instance_pool_id is specified, this field is not needed.
    NumWorkers int
    Number of worker nodes that this cluster should have. A cluster has one Spark driver and num_workers executors for a total of num_workers + 1 Spark nodes.
    PolicyId string
    RuntimeEngine string
    The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: PHOTON, STANDARD.
    SingleUserName string
    The optional user name of the user to assign to an interactive cluster. This field is required when using data_security_mode set to SINGLE_USER or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
    SparkConf map[string]interface{}
    should have following items:

    • spark.databricks.repl.allowedLanguages set to a list of supported languages, for example: python,sql, or python,sql,r. Scala is not supported!
    • spark.databricks.cluster.profile set to serverless
    SparkEnvVars map[string]interface{}
    Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
    SparkVersion string
    Runtime version of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
    SshPublicKeys []string
    SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
    State string
    (string) State of the cluster.
    Url string
    URL for the Docker image
    WorkloadType ClusterWorkloadTypeArgs
    applyPolicyDefaultValues Boolean
    Whether to use policy default values for missing cluster attributes.
    autoscale ClusterAutoscale
    autoterminationMinutes Integer
    Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to 60. We highly recommend having this setting present for Interactive/BI clusters.
    awsAttributes ClusterAwsAttributes
    azureAttributes ClusterAzureAttributes
    clusterId String
    clusterLogConf ClusterClusterLogConf
    clusterMountInfos List<ClusterClusterMountInfo>
    clusterName String
    Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
    clusterSource String
    customTags Map<String,Object>

    should have tag ResourceClass set to value Serverless

    For example:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const clusterWithTableAccessControl = new databricks.Cluster("clusterWithTableAccessControl", { clusterName: "Shared High-Concurrency", sparkVersion: data.databricks_spark_version.latest_lts.id, nodeTypeId: data.databricks_node_type.smallest.id, autoterminationMinutes: 20, sparkConf: { "spark.databricks.repl.allowedLanguages": "python,sql", "spark.databricks.cluster.profile": "serverless", }, customTags: { ResourceClass: "Serverless", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    cluster_with_table_access_control = databricks.Cluster("clusterWithTableAccessControl",
        cluster_name="Shared High-Concurrency",
        spark_version=data["databricks_spark_version"]["latest_lts"]["id"],
        node_type_id=data["databricks_node_type"]["smallest"]["id"],
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.cluster.profile": "serverless",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var clusterWithTableAccessControl = new Databricks.Cluster("clusterWithTableAccessControl", new()
        {
            ClusterName = "Shared High-Concurrency",
            SparkVersion = data.Databricks_spark_version.Latest_lts.Id,
            NodeTypeId = data.Databricks_node_type.Smallest.Id,
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.cluster.profile", "serverless" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "clusterWithTableAccessControl", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared High-Concurrency"),
    			SparkVersion:           pulumi.Any(data.Databricks_spark_version.Latest_lts.Id),
    			NodeTypeId:             pulumi.Any(data.Databricks_node_type.Smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.repl.allowedLanguages": pulumi.Any("python,sql"),
    				"spark.databricks.cluster.profile":       pulumi.Any("serverless"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var clusterWithTableAccessControl = new Cluster("clusterWithTableAccessControl", ClusterArgs.builder()        
                .clusterName("Shared High-Concurrency")
                .sparkVersion(data.databricks_spark_version().latest_lts().id())
                .nodeTypeId(data.databricks_node_type().smallest().id())
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.cluster.profile", "serverless")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
        }
    }
    
    resources:
      clusterWithTableAccessControl:
        type: databricks:Cluster
        properties:
          clusterName: Shared High-Concurrency
          sparkVersion: ${data.databricks_spark_version.latest_lts.id}
          nodeTypeId: ${data.databricks_node_type.smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.cluster.profile: serverless
          customTags:
            ResourceClass: Serverless
    
    dataSecurityMode String
    Select the security features of the cluster. Unity Catalog requires SINGLE_USER or USER_ISOLATION mode. LEGACY_PASSTHROUGH for passthrough cluster and LEGACY_TABLE_ACL for Table ACL cluster. If omitted, no security features are enabled. In the Databricks UI, this has been recently been renamed Access Mode and USER_ISOLATION has been renamed Shared, but use these terms here.
    defaultTags Map<String,Object>
    (map) Tags that are added by Databricks by default, regardless of any custom_tags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: , and any workspace and pool tags.
    dockerImage ClusterDockerImage
    driverInstancePoolId String
    similar to instance_pool_id, but for driver node. If omitted, and instance_pool_id is specified, then the driver will be allocated from that pool.
    driverNodeTypeId String
    The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as node_type_id defined above.
    enableElasticDisk Boolean
    If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have autotermination_minutes and autoscale attributes set. More documentation available at cluster configuration page.
    enableLocalDiskEncryption Boolean
    Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.
    gcpAttributes ClusterGcpAttributes
    idempotencyToken String
    An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
    initScripts List<ClusterInitScript>
    instancePoolId String
    To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to TERMINATED, the instances it used are returned to the pool and reused by a different cluster.
    isPinned Boolean

    boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is limited to 100, so apply may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).

    The following example demonstrates how to create an autoscaling cluster with Delta Cache enabled:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", { clusterName: "Shared Autoscaling", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, autoscale: { minWorkers: 1, maxWorkers: 50, }, sparkConf: { "spark.databricks.io.cache.enabled": true, "spark.databricks.io.cache.maxDiskUsage": "50g", "spark.databricks.io.cache.maxMetaDataCache": "1g", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ),
        spark_conf={
            "spark.databricks.io.cache.enabled": True,
            "spark.databricks.io.cache.maxDiskUsage": "50g",
            "spark.databricks.io.cache.maxMetaDataCache": "1g",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
            SparkConf = 
            {
                { "spark.databricks.io.cache.enabled", true },
                { "spark.databricks.io.cache.maxDiskUsage", "50g" },
                { "spark.databricks.io.cache.maxMetaDataCache", "1g" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    			SparkConf: pulumi.Map{
    				"spark.databricks.io.cache.enabled":          pulumi.Any(true),
    				"spark.databricks.io.cache.maxDiskUsage":     pulumi.Any("50g"),
    				"spark.databricks.io.cache.maxMetaDataCache": pulumi.Any("1g"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.io.cache.enabled", true),
                    Map.entry("spark.databricks.io.cache.maxDiskUsage", "50g"),
                    Map.entry("spark.databricks.io.cache.maxMetaDataCache", "1g")
                ))
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
          sparkConf:
            spark.databricks.io.cache.enabled: true
            spark.databricks.io.cache.maxDiskUsage: 50g
            spark.databricks.io.cache.maxMetaDataCache: 1g
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    libraries List<ClusterLibrary>
    nodeTypeId String
    Any supported databricks.getNodeType id. If instance_pool_id is specified, this field is not needed.
    numWorkers Integer
    Number of worker nodes that this cluster should have. A cluster has one Spark driver and num_workers executors for a total of num_workers + 1 Spark nodes.
    policyId String
    runtimeEngine String
    The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: PHOTON, STANDARD.
    singleUserName String
    The optional user name of the user to assign to an interactive cluster. This field is required when using data_security_mode set to SINGLE_USER or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
    sparkConf Map<String,Object>
    should have following items:

    • spark.databricks.repl.allowedLanguages set to a list of supported languages, for example: python,sql, or python,sql,r. Scala is not supported!
    • spark.databricks.cluster.profile set to serverless
    sparkEnvVars Map<String,Object>
    Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
    sparkVersion String
    Runtime version of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
    sshPublicKeys List<String>
    SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
    state String
    (string) State of the cluster.
    url String
    URL for the Docker image
    workloadType ClusterWorkloadType
    applyPolicyDefaultValues boolean
    Whether to use policy default values for missing cluster attributes.
    autoscale ClusterAutoscale
    autoterminationMinutes number
    Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to 60. We highly recommend having this setting present for Interactive/BI clusters.
    awsAttributes ClusterAwsAttributes
    azureAttributes ClusterAzureAttributes
    clusterId string
    clusterLogConf ClusterClusterLogConf
    clusterMountInfos ClusterClusterMountInfo[]
    clusterName string
    Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
    clusterSource string
    customTags {[key: string]: any}

    should have tag ResourceClass set to value Serverless

    For example:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const clusterWithTableAccessControl = new databricks.Cluster("clusterWithTableAccessControl", { clusterName: "Shared High-Concurrency", sparkVersion: data.databricks_spark_version.latest_lts.id, nodeTypeId: data.databricks_node_type.smallest.id, autoterminationMinutes: 20, sparkConf: { "spark.databricks.repl.allowedLanguages": "python,sql", "spark.databricks.cluster.profile": "serverless", }, customTags: { ResourceClass: "Serverless", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    cluster_with_table_access_control = databricks.Cluster("clusterWithTableAccessControl",
        cluster_name="Shared High-Concurrency",
        spark_version=data["databricks_spark_version"]["latest_lts"]["id"],
        node_type_id=data["databricks_node_type"]["smallest"]["id"],
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.cluster.profile": "serverless",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var clusterWithTableAccessControl = new Databricks.Cluster("clusterWithTableAccessControl", new()
        {
            ClusterName = "Shared High-Concurrency",
            SparkVersion = data.Databricks_spark_version.Latest_lts.Id,
            NodeTypeId = data.Databricks_node_type.Smallest.Id,
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.cluster.profile", "serverless" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "clusterWithTableAccessControl", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared High-Concurrency"),
    			SparkVersion:           pulumi.Any(data.Databricks_spark_version.Latest_lts.Id),
    			NodeTypeId:             pulumi.Any(data.Databricks_node_type.Smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.repl.allowedLanguages": pulumi.Any("python,sql"),
    				"spark.databricks.cluster.profile":       pulumi.Any("serverless"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var clusterWithTableAccessControl = new Cluster("clusterWithTableAccessControl", ClusterArgs.builder()        
                .clusterName("Shared High-Concurrency")
                .sparkVersion(data.databricks_spark_version().latest_lts().id())
                .nodeTypeId(data.databricks_node_type().smallest().id())
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.cluster.profile", "serverless")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
        }
    }
    
    resources:
      clusterWithTableAccessControl:
        type: databricks:Cluster
        properties:
          clusterName: Shared High-Concurrency
          sparkVersion: ${data.databricks_spark_version.latest_lts.id}
          nodeTypeId: ${data.databricks_node_type.smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.cluster.profile: serverless
          customTags:
            ResourceClass: Serverless
    
    dataSecurityMode string
    Select the security features of the cluster. Unity Catalog requires SINGLE_USER or USER_ISOLATION mode. LEGACY_PASSTHROUGH for passthrough cluster and LEGACY_TABLE_ACL for Table ACL cluster. If omitted, no security features are enabled. In the Databricks UI, this has been recently been renamed Access Mode and USER_ISOLATION has been renamed Shared, but use these terms here.
    defaultTags {[key: string]: any}
    (map) Tags that are added by Databricks by default, regardless of any custom_tags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: , and any workspace and pool tags.
    dockerImage ClusterDockerImage
    driverInstancePoolId string
    similar to instance_pool_id, but for driver node. If omitted, and instance_pool_id is specified, then the driver will be allocated from that pool.
    driverNodeTypeId string
    The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as node_type_id defined above.
    enableElasticDisk boolean
    If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have autotermination_minutes and autoscale attributes set. More documentation available at cluster configuration page.
    enableLocalDiskEncryption boolean
    Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.
    gcpAttributes ClusterGcpAttributes
    idempotencyToken string
    An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
    initScripts ClusterInitScript[]
    instancePoolId string
    To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to TERMINATED, the instances it used are returned to the pool and reused by a different cluster.
    isPinned boolean

    boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is limited to 100, so apply may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).

    The following example demonstrates how to create an autoscaling cluster with Delta Cache enabled:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", { clusterName: "Shared Autoscaling", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, autoscale: { minWorkers: 1, maxWorkers: 50, }, sparkConf: { "spark.databricks.io.cache.enabled": true, "spark.databricks.io.cache.maxDiskUsage": "50g", "spark.databricks.io.cache.maxMetaDataCache": "1g", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ),
        spark_conf={
            "spark.databricks.io.cache.enabled": True,
            "spark.databricks.io.cache.maxDiskUsage": "50g",
            "spark.databricks.io.cache.maxMetaDataCache": "1g",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
            SparkConf = 
            {
                { "spark.databricks.io.cache.enabled", true },
                { "spark.databricks.io.cache.maxDiskUsage", "50g" },
                { "spark.databricks.io.cache.maxMetaDataCache", "1g" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    			SparkConf: pulumi.Map{
    				"spark.databricks.io.cache.enabled":          pulumi.Any(true),
    				"spark.databricks.io.cache.maxDiskUsage":     pulumi.Any("50g"),
    				"spark.databricks.io.cache.maxMetaDataCache": pulumi.Any("1g"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.io.cache.enabled", true),
                    Map.entry("spark.databricks.io.cache.maxDiskUsage", "50g"),
                    Map.entry("spark.databricks.io.cache.maxMetaDataCache", "1g")
                ))
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
          sparkConf:
            spark.databricks.io.cache.enabled: true
            spark.databricks.io.cache.maxDiskUsage: 50g
            spark.databricks.io.cache.maxMetaDataCache: 1g
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    libraries ClusterLibrary[]
    nodeTypeId string
    Any supported databricks.getNodeType id. If instance_pool_id is specified, this field is not needed.
    numWorkers number
    Number of worker nodes that this cluster should have. A cluster has one Spark driver and num_workers executors for a total of num_workers + 1 Spark nodes.
    policyId string
    runtimeEngine string
    The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: PHOTON, STANDARD.
    singleUserName string
    The optional user name of the user to assign to an interactive cluster. This field is required when using data_security_mode set to SINGLE_USER or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
    sparkConf {[key: string]: any}
    should have following items:

    • spark.databricks.repl.allowedLanguages set to a list of supported languages, for example: python,sql, or python,sql,r. Scala is not supported!
    • spark.databricks.cluster.profile set to serverless
    sparkEnvVars {[key: string]: any}
    Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
    sparkVersion string
    Runtime version of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
    sshPublicKeys string[]
    SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
    state string
    (string) State of the cluster.
    url string
    URL for the Docker image
    workloadType ClusterWorkloadType
    apply_policy_default_values bool
    Whether to use policy default values for missing cluster attributes.
    autoscale ClusterAutoscaleArgs
    autotermination_minutes int
    Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to 60. We highly recommend having this setting present for Interactive/BI clusters.
    aws_attributes ClusterAwsAttributesArgs
    azure_attributes ClusterAzureAttributesArgs
    cluster_id str
    cluster_log_conf ClusterClusterLogConfArgs
    cluster_mount_infos Sequence[ClusterClusterMountInfoArgs]
    cluster_name str
    Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
    cluster_source str
    custom_tags Mapping[str, Any]

    should have tag ResourceClass set to value Serverless

    For example:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const clusterWithTableAccessControl = new databricks.Cluster("clusterWithTableAccessControl", { clusterName: "Shared High-Concurrency", sparkVersion: data.databricks_spark_version.latest_lts.id, nodeTypeId: data.databricks_node_type.smallest.id, autoterminationMinutes: 20, sparkConf: { "spark.databricks.repl.allowedLanguages": "python,sql", "spark.databricks.cluster.profile": "serverless", }, customTags: { ResourceClass: "Serverless", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    cluster_with_table_access_control = databricks.Cluster("clusterWithTableAccessControl",
        cluster_name="Shared High-Concurrency",
        spark_version=data["databricks_spark_version"]["latest_lts"]["id"],
        node_type_id=data["databricks_node_type"]["smallest"]["id"],
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.cluster.profile": "serverless",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var clusterWithTableAccessControl = new Databricks.Cluster("clusterWithTableAccessControl", new()
        {
            ClusterName = "Shared High-Concurrency",
            SparkVersion = data.Databricks_spark_version.Latest_lts.Id,
            NodeTypeId = data.Databricks_node_type.Smallest.Id,
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.cluster.profile", "serverless" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "clusterWithTableAccessControl", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared High-Concurrency"),
    			SparkVersion:           pulumi.Any(data.Databricks_spark_version.Latest_lts.Id),
    			NodeTypeId:             pulumi.Any(data.Databricks_node_type.Smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.repl.allowedLanguages": pulumi.Any("python,sql"),
    				"spark.databricks.cluster.profile":       pulumi.Any("serverless"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var clusterWithTableAccessControl = new Cluster("clusterWithTableAccessControl", ClusterArgs.builder()        
                .clusterName("Shared High-Concurrency")
                .sparkVersion(data.databricks_spark_version().latest_lts().id())
                .nodeTypeId(data.databricks_node_type().smallest().id())
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.cluster.profile", "serverless")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
        }
    }
    
    resources:
      clusterWithTableAccessControl:
        type: databricks:Cluster
        properties:
          clusterName: Shared High-Concurrency
          sparkVersion: ${data.databricks_spark_version.latest_lts.id}
          nodeTypeId: ${data.databricks_node_type.smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.cluster.profile: serverless
          customTags:
            ResourceClass: Serverless
    
    data_security_mode str
    Select the security features of the cluster. Unity Catalog requires SINGLE_USER or USER_ISOLATION mode. LEGACY_PASSTHROUGH for passthrough cluster and LEGACY_TABLE_ACL for Table ACL cluster. If omitted, no security features are enabled. In the Databricks UI, this has been recently been renamed Access Mode and USER_ISOLATION has been renamed Shared, but use these terms here.
    default_tags Mapping[str, Any]
    (map) Tags that are added by Databricks by default, regardless of any custom_tags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: , and any workspace and pool tags.
    docker_image ClusterDockerImageArgs
    driver_instance_pool_id str
    similar to instance_pool_id, but for driver node. If omitted, and instance_pool_id is specified, then the driver will be allocated from that pool.
    driver_node_type_id str
    The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as node_type_id defined above.
    enable_elastic_disk bool
    If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have autotermination_minutes and autoscale attributes set. More documentation available at cluster configuration page.
    enable_local_disk_encryption bool
    Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.
    gcp_attributes ClusterGcpAttributesArgs
    idempotency_token str
    An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
    init_scripts Sequence[ClusterInitScriptArgs]
    instance_pool_id str
    To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to TERMINATED, the instances it used are returned to the pool and reused by a different cluster.
    is_pinned bool

    boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is limited to 100, so apply may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).

    The following example demonstrates how to create an autoscaling cluster with Delta Cache enabled:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", { clusterName: "Shared Autoscaling", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, autoscale: { minWorkers: 1, maxWorkers: 50, }, sparkConf: { "spark.databricks.io.cache.enabled": true, "spark.databricks.io.cache.maxDiskUsage": "50g", "spark.databricks.io.cache.maxMetaDataCache": "1g", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ),
        spark_conf={
            "spark.databricks.io.cache.enabled": True,
            "spark.databricks.io.cache.maxDiskUsage": "50g",
            "spark.databricks.io.cache.maxMetaDataCache": "1g",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
            SparkConf = 
            {
                { "spark.databricks.io.cache.enabled", true },
                { "spark.databricks.io.cache.maxDiskUsage", "50g" },
                { "spark.databricks.io.cache.maxMetaDataCache", "1g" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    			SparkConf: pulumi.Map{
    				"spark.databricks.io.cache.enabled":          pulumi.Any(true),
    				"spark.databricks.io.cache.maxDiskUsage":     pulumi.Any("50g"),
    				"spark.databricks.io.cache.maxMetaDataCache": pulumi.Any("1g"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.io.cache.enabled", true),
                    Map.entry("spark.databricks.io.cache.maxDiskUsage", "50g"),
                    Map.entry("spark.databricks.io.cache.maxMetaDataCache", "1g")
                ))
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
          sparkConf:
            spark.databricks.io.cache.enabled: true
            spark.databricks.io.cache.maxDiskUsage: 50g
            spark.databricks.io.cache.maxMetaDataCache: 1g
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    libraries Sequence[ClusterLibraryArgs]
    node_type_id str
    Any supported databricks.getNodeType id. If instance_pool_id is specified, this field is not needed.
    num_workers int
    Number of worker nodes that this cluster should have. A cluster has one Spark driver and num_workers executors for a total of num_workers + 1 Spark nodes.
    policy_id str
    runtime_engine str
    The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: PHOTON, STANDARD.
    single_user_name str
    The optional user name of the user to assign to an interactive cluster. This field is required when using data_security_mode set to SINGLE_USER or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
    spark_conf Mapping[str, Any]
    should have following items:

    • spark.databricks.repl.allowedLanguages set to a list of supported languages, for example: python,sql, or python,sql,r. Scala is not supported!
    • spark.databricks.cluster.profile set to serverless
    spark_env_vars Mapping[str, Any]
    Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
    spark_version str
    Runtime version of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
    ssh_public_keys Sequence[str]
    SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
    state str
    (string) State of the cluster.
    url str
    URL for the Docker image
    workload_type ClusterWorkloadTypeArgs
    applyPolicyDefaultValues Boolean
    Whether to use policy default values for missing cluster attributes.
    autoscale Property Map
    autoterminationMinutes Number
    Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to 60. We highly recommend having this setting present for Interactive/BI clusters.
    awsAttributes Property Map
    azureAttributes Property Map
    clusterId String
    clusterLogConf Property Map
    clusterMountInfos List<Property Map>
    clusterName String
    Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
    clusterSource String
    customTags Map<Any>

    should have tag ResourceClass set to value Serverless

    For example:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const clusterWithTableAccessControl = new databricks.Cluster("clusterWithTableAccessControl", { clusterName: "Shared High-Concurrency", sparkVersion: data.databricks_spark_version.latest_lts.id, nodeTypeId: data.databricks_node_type.smallest.id, autoterminationMinutes: 20, sparkConf: { "spark.databricks.repl.allowedLanguages": "python,sql", "spark.databricks.cluster.profile": "serverless", }, customTags: { ResourceClass: "Serverless", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    cluster_with_table_access_control = databricks.Cluster("clusterWithTableAccessControl",
        cluster_name="Shared High-Concurrency",
        spark_version=data["databricks_spark_version"]["latest_lts"]["id"],
        node_type_id=data["databricks_node_type"]["smallest"]["id"],
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.cluster.profile": "serverless",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var clusterWithTableAccessControl = new Databricks.Cluster("clusterWithTableAccessControl", new()
        {
            ClusterName = "Shared High-Concurrency",
            SparkVersion = data.Databricks_spark_version.Latest_lts.Id,
            NodeTypeId = data.Databricks_node_type.Smallest.Id,
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.cluster.profile", "serverless" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "clusterWithTableAccessControl", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared High-Concurrency"),
    			SparkVersion:           pulumi.Any(data.Databricks_spark_version.Latest_lts.Id),
    			NodeTypeId:             pulumi.Any(data.Databricks_node_type.Smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.repl.allowedLanguages": pulumi.Any("python,sql"),
    				"spark.databricks.cluster.profile":       pulumi.Any("serverless"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var clusterWithTableAccessControl = new Cluster("clusterWithTableAccessControl", ClusterArgs.builder()        
                .clusterName("Shared High-Concurrency")
                .sparkVersion(data.databricks_spark_version().latest_lts().id())
                .nodeTypeId(data.databricks_node_type().smallest().id())
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.cluster.profile", "serverless")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
        }
    }
    
    resources:
      clusterWithTableAccessControl:
        type: databricks:Cluster
        properties:
          clusterName: Shared High-Concurrency
          sparkVersion: ${data.databricks_spark_version.latest_lts.id}
          nodeTypeId: ${data.databricks_node_type.smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.cluster.profile: serverless
          customTags:
            ResourceClass: Serverless
    
    dataSecurityMode String
    Select the security features of the cluster. Unity Catalog requires SINGLE_USER or USER_ISOLATION mode. LEGACY_PASSTHROUGH for passthrough cluster and LEGACY_TABLE_ACL for Table ACL cluster. If omitted, no security features are enabled. In the Databricks UI, this has been recently been renamed Access Mode and USER_ISOLATION has been renamed Shared, but use these terms here.
    defaultTags Map<Any>
    (map) Tags that are added by Databricks by default, regardless of any custom_tags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: , and any workspace and pool tags.
    dockerImage Property Map
    driverInstancePoolId String
    similar to instance_pool_id, but for driver node. If omitted, and instance_pool_id is specified, then the driver will be allocated from that pool.
    driverNodeTypeId String
    The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as node_type_id defined above.
    enableElasticDisk Boolean
    If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have autotermination_minutes and autoscale attributes set. More documentation available at cluster configuration page.
    enableLocalDiskEncryption Boolean
    Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.
    gcpAttributes Property Map
    idempotencyToken String
    An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
    initScripts List<Property Map>
    instancePoolId String
    To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to TERMINATED, the instances it used are returned to the pool and reused by a different cluster.
    isPinned Boolean

    boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is limited to 100, so apply may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).

    The following example demonstrates how to create an autoscaling cluster with Delta Cache enabled:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const sharedAutoscaling = new databricks.Cluster("sharedAutoscaling", { clusterName: "Shared Autoscaling", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, autoscale: { minWorkers: 1, maxWorkers: 50, }, sparkConf: { "spark.databricks.io.cache.enabled": true, "spark.databricks.io.cache.maxDiskUsage": "50g", "spark.databricks.io.cache.maxMetaDataCache": "1g", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    shared_autoscaling = databricks.Cluster("sharedAutoscaling",
        cluster_name="Shared Autoscaling",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        autoscale=databricks.ClusterAutoscaleArgs(
            min_workers=1,
            max_workers=50,
        ),
        spark_conf={
            "spark.databricks.io.cache.enabled": True,
            "spark.databricks.io.cache.maxDiskUsage": "50g",
            "spark.databricks.io.cache.maxMetaDataCache": "1g",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var sharedAutoscaling = new Databricks.Cluster("sharedAutoscaling", new()
        {
            ClusterName = "Shared Autoscaling",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
            {
                MinWorkers = 1,
                MaxWorkers = 50,
            },
            SparkConf = 
            {
                { "spark.databricks.io.cache.enabled", true },
                { "spark.databricks.io.cache.maxDiskUsage", "50g" },
                { "spark.databricks.io.cache.maxMetaDataCache", "1g" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "sharedAutoscaling", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Autoscaling"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			Autoscale: &databricks.ClusterAutoscaleArgs{
    				MinWorkers: pulumi.Int(1),
    				MaxWorkers: pulumi.Int(50),
    			},
    			SparkConf: pulumi.Map{
    				"spark.databricks.io.cache.enabled":          pulumi.Any(true),
    				"spark.databricks.io.cache.maxDiskUsage":     pulumi.Any("50g"),
    				"spark.databricks.io.cache.maxMetaDataCache": pulumi.Any("1g"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()        
                .clusterName("Shared Autoscaling")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .autoscale(ClusterAutoscaleArgs.builder()
                    .minWorkers(1)
                    .maxWorkers(50)
                    .build())
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.io.cache.enabled", true),
                    Map.entry("spark.databricks.io.cache.maxDiskUsage", "50g"),
                    Map.entry("spark.databricks.io.cache.maxMetaDataCache", "1g")
                ))
                .build());
    
        }
    }
    
    resources:
      sharedAutoscaling:
        type: databricks:Cluster
        properties:
          clusterName: Shared Autoscaling
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          autoscale:
            minWorkers: 1
            maxWorkers: 50
          sparkConf:
            spark.databricks.io.cache.enabled: true
            spark.databricks.io.cache.maxDiskUsage: 50g
            spark.databricks.io.cache.maxMetaDataCache: 1g
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    libraries List<Property Map>
    nodeTypeId String
    Any supported databricks.getNodeType id. If instance_pool_id is specified, this field is not needed.
    numWorkers Number
    Number of worker nodes that this cluster should have. A cluster has one Spark driver and num_workers executors for a total of num_workers + 1 Spark nodes.
    policyId String
    runtimeEngine String
    The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: PHOTON, STANDARD.
    singleUserName String
    The optional user name of the user to assign to an interactive cluster. This field is required when using data_security_mode set to SINGLE_USER or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
    sparkConf Map<Any>
    should have following items:

    • spark.databricks.repl.allowedLanguages set to a list of supported languages, for example: python,sql, or python,sql,r. Scala is not supported!
    • spark.databricks.cluster.profile set to serverless
    sparkEnvVars Map<Any>
    Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
    sparkVersion String
    Runtime version of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
    sshPublicKeys List<String>
    SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
    state String
    (string) State of the cluster.
    url String
    URL for the Docker image
    workloadType Property Map

    Supporting Types

    ClusterAutoscale, ClusterAutoscaleArgs

    MaxWorkers int

    The maximum number of workers to which the cluster can scale up when overloaded. max_workers must be strictly greater than min_workers.

    When using a Single Node cluster, num_workers needs to be 0. It can be set to 0 explicitly, or simply not specified, as it defaults to 0. When num_workers is 0, provider checks for presence of the required Spark configurations:

    • spark.master must have prefix local, like local[*]
    • spark.databricks.cluster.profile must have value singleNode

    and also custom_tag entry:

    • "ResourceClass" = "SingleNode"

    The following example demonstrates how to create an single node cluster:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const singleNode = new databricks.Cluster("singleNode", { clusterName: "Single Node", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, sparkConf: { "spark.databricks.cluster.profile": "singleNode", "spark.master": "local[*]", }, customTags: { ResourceClass: "SingleNode", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    single_node = databricks.Cluster("singleNode",
        cluster_name="Single Node",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.cluster.profile": "singleNode",
            "spark.master": "local[*]",
        },
        custom_tags={
            "ResourceClass": "SingleNode",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var singleNode = new Databricks.Cluster("singleNode", new()
        {
            ClusterName = "Single Node",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.cluster.profile", "singleNode" },
                { "spark.master", "local[*]" },
            },
            CustomTags = 
            {
                { "ResourceClass", "SingleNode" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "singleNode", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Single Node"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.cluster.profile": pulumi.Any("singleNode"),
    				"spark.master":                     pulumi.Any("local[*]"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("SingleNode"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var singleNode = new Cluster("singleNode", ClusterArgs.builder()        
                .clusterName("Single Node")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.cluster.profile", "singleNode"),
                    Map.entry("spark.master", "local[*]")
                ))
                .customTags(Map.of("ResourceClass", "SingleNode"))
                .build());
    
        }
    }
    
    resources:
      singleNode:
        type: databricks:Cluster
        properties:
          clusterName: Single Node
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.cluster.profile: singleNode
            spark.master: local[*]
          customTags:
            ResourceClass: SingleNode
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    MinWorkers int
    The minimum number of workers to which the cluster can scale down when underutilized. It is also the initial number of workers the cluster will have after creation.
    MaxWorkers int

    The maximum number of workers to which the cluster can scale up when overloaded. max_workers must be strictly greater than min_workers.

    When using a Single Node cluster, num_workers needs to be 0. It can be set to 0 explicitly, or simply not specified, as it defaults to 0. When num_workers is 0, provider checks for presence of the required Spark configurations:

    • spark.master must have prefix local, like local[*]
    • spark.databricks.cluster.profile must have value singleNode

    and also custom_tag entry:

    • "ResourceClass" = "SingleNode"

    The following example demonstrates how to create an single node cluster:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const singleNode = new databricks.Cluster("singleNode", { clusterName: "Single Node", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, sparkConf: { "spark.databricks.cluster.profile": "singleNode", "spark.master": "local[*]", }, customTags: { ResourceClass: "SingleNode", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    single_node = databricks.Cluster("singleNode",
        cluster_name="Single Node",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.cluster.profile": "singleNode",
            "spark.master": "local[*]",
        },
        custom_tags={
            "ResourceClass": "SingleNode",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var singleNode = new Databricks.Cluster("singleNode", new()
        {
            ClusterName = "Single Node",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.cluster.profile", "singleNode" },
                { "spark.master", "local[*]" },
            },
            CustomTags = 
            {
                { "ResourceClass", "SingleNode" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "singleNode", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Single Node"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.cluster.profile": pulumi.Any("singleNode"),
    				"spark.master":                     pulumi.Any("local[*]"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("SingleNode"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var singleNode = new Cluster("singleNode", ClusterArgs.builder()        
                .clusterName("Single Node")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.cluster.profile", "singleNode"),
                    Map.entry("spark.master", "local[*]")
                ))
                .customTags(Map.of("ResourceClass", "SingleNode"))
                .build());
    
        }
    }
    
    resources:
      singleNode:
        type: databricks:Cluster
        properties:
          clusterName: Single Node
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.cluster.profile: singleNode
            spark.master: local[*]
          customTags:
            ResourceClass: SingleNode
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    MinWorkers int
    The minimum number of workers to which the cluster can scale down when underutilized. It is also the initial number of workers the cluster will have after creation.
    maxWorkers Integer

    The maximum number of workers to which the cluster can scale up when overloaded. max_workers must be strictly greater than min_workers.

    When using a Single Node cluster, num_workers needs to be 0. It can be set to 0 explicitly, or simply not specified, as it defaults to 0. When num_workers is 0, provider checks for presence of the required Spark configurations:

    • spark.master must have prefix local, like local[*]
    • spark.databricks.cluster.profile must have value singleNode

    and also custom_tag entry:

    • "ResourceClass" = "SingleNode"

    The following example demonstrates how to create an single node cluster:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const singleNode = new databricks.Cluster("singleNode", { clusterName: "Single Node", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, sparkConf: { "spark.databricks.cluster.profile": "singleNode", "spark.master": "local[*]", }, customTags: { ResourceClass: "SingleNode", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    single_node = databricks.Cluster("singleNode",
        cluster_name="Single Node",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.cluster.profile": "singleNode",
            "spark.master": "local[*]",
        },
        custom_tags={
            "ResourceClass": "SingleNode",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var singleNode = new Databricks.Cluster("singleNode", new()
        {
            ClusterName = "Single Node",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.cluster.profile", "singleNode" },
                { "spark.master", "local[*]" },
            },
            CustomTags = 
            {
                { "ResourceClass", "SingleNode" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "singleNode", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Single Node"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.cluster.profile": pulumi.Any("singleNode"),
    				"spark.master":                     pulumi.Any("local[*]"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("SingleNode"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var singleNode = new Cluster("singleNode", ClusterArgs.builder()        
                .clusterName("Single Node")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.cluster.profile", "singleNode"),
                    Map.entry("spark.master", "local[*]")
                ))
                .customTags(Map.of("ResourceClass", "SingleNode"))
                .build());
    
        }
    }
    
    resources:
      singleNode:
        type: databricks:Cluster
        properties:
          clusterName: Single Node
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.cluster.profile: singleNode
            spark.master: local[*]
          customTags:
            ResourceClass: SingleNode
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    minWorkers Integer
    The minimum number of workers to which the cluster can scale down when underutilized. It is also the initial number of workers the cluster will have after creation.
    maxWorkers number

    The maximum number of workers to which the cluster can scale up when overloaded. max_workers must be strictly greater than min_workers.

    When using a Single Node cluster, num_workers needs to be 0. It can be set to 0 explicitly, or simply not specified, as it defaults to 0. When num_workers is 0, provider checks for presence of the required Spark configurations:

    • spark.master must have prefix local, like local[*]
    • spark.databricks.cluster.profile must have value singleNode

    and also custom_tag entry:

    • "ResourceClass" = "SingleNode"

    The following example demonstrates how to create an single node cluster:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const singleNode = new databricks.Cluster("singleNode", { clusterName: "Single Node", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, sparkConf: { "spark.databricks.cluster.profile": "singleNode", "spark.master": "local[*]", }, customTags: { ResourceClass: "SingleNode", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    single_node = databricks.Cluster("singleNode",
        cluster_name="Single Node",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.cluster.profile": "singleNode",
            "spark.master": "local[*]",
        },
        custom_tags={
            "ResourceClass": "SingleNode",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var singleNode = new Databricks.Cluster("singleNode", new()
        {
            ClusterName = "Single Node",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.cluster.profile", "singleNode" },
                { "spark.master", "local[*]" },
            },
            CustomTags = 
            {
                { "ResourceClass", "SingleNode" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "singleNode", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Single Node"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.cluster.profile": pulumi.Any("singleNode"),
    				"spark.master":                     pulumi.Any("local[*]"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("SingleNode"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var singleNode = new Cluster("singleNode", ClusterArgs.builder()        
                .clusterName("Single Node")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.cluster.profile", "singleNode"),
                    Map.entry("spark.master", "local[*]")
                ))
                .customTags(Map.of("ResourceClass", "SingleNode"))
                .build());
    
        }
    }
    
    resources:
      singleNode:
        type: databricks:Cluster
        properties:
          clusterName: Single Node
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.cluster.profile: singleNode
            spark.master: local[*]
          customTags:
            ResourceClass: SingleNode
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    minWorkers number
    The minimum number of workers to which the cluster can scale down when underutilized. It is also the initial number of workers the cluster will have after creation.
    max_workers int

    The maximum number of workers to which the cluster can scale up when overloaded. max_workers must be strictly greater than min_workers.

    When using a Single Node cluster, num_workers needs to be 0. It can be set to 0 explicitly, or simply not specified, as it defaults to 0. When num_workers is 0, provider checks for presence of the required Spark configurations:

    • spark.master must have prefix local, like local[*]
    • spark.databricks.cluster.profile must have value singleNode

    and also custom_tag entry:

    • "ResourceClass" = "SingleNode"

    The following example demonstrates how to create an single node cluster:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const singleNode = new databricks.Cluster("singleNode", { clusterName: "Single Node", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, sparkConf: { "spark.databricks.cluster.profile": "singleNode", "spark.master": "local[*]", }, customTags: { ResourceClass: "SingleNode", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    single_node = databricks.Cluster("singleNode",
        cluster_name="Single Node",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.cluster.profile": "singleNode",
            "spark.master": "local[*]",
        },
        custom_tags={
            "ResourceClass": "SingleNode",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var singleNode = new Databricks.Cluster("singleNode", new()
        {
            ClusterName = "Single Node",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.cluster.profile", "singleNode" },
                { "spark.master", "local[*]" },
            },
            CustomTags = 
            {
                { "ResourceClass", "SingleNode" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "singleNode", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Single Node"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.cluster.profile": pulumi.Any("singleNode"),
    				"spark.master":                     pulumi.Any("local[*]"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("SingleNode"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var singleNode = new Cluster("singleNode", ClusterArgs.builder()        
                .clusterName("Single Node")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.cluster.profile", "singleNode"),
                    Map.entry("spark.master", "local[*]")
                ))
                .customTags(Map.of("ResourceClass", "SingleNode"))
                .build());
    
        }
    }
    
    resources:
      singleNode:
        type: databricks:Cluster
        properties:
          clusterName: Single Node
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.cluster.profile: singleNode
            spark.master: local[*]
          customTags:
            ResourceClass: SingleNode
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    min_workers int
    The minimum number of workers to which the cluster can scale down when underutilized. It is also the initial number of workers the cluster will have after creation.
    maxWorkers Number

    The maximum number of workers to which the cluster can scale up when overloaded. max_workers must be strictly greater than min_workers.

    When using a Single Node cluster, num_workers needs to be 0. It can be set to 0 explicitly, or simply not specified, as it defaults to 0. When num_workers is 0, provider checks for presence of the required Spark configurations:

    • spark.master must have prefix local, like local[*]
    • spark.databricks.cluster.profile must have value singleNode

    and also custom_tag entry:

    • "ResourceClass" = "SingleNode"

    The following example demonstrates how to create an single node cluster:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const smallest = databricks.getNodeType({ localDisk: true, }); const latestLts = databricks.getSparkVersion({ longTermSupport: true, }); const singleNode = new databricks.Cluster("singleNode", { clusterName: "Single Node", sparkVersion: latestLts.then(latestLts => latestLts.id), nodeTypeId: smallest.then(smallest => smallest.id), autoterminationMinutes: 20, sparkConf: { "spark.databricks.cluster.profile": "singleNode", "spark.master": "local[*]", }, customTags: { ResourceClass: "SingleNode", }, });

    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type(local_disk=True)
    latest_lts = databricks.get_spark_version(long_term_support=True)
    single_node = databricks.Cluster("singleNode",
        cluster_name="Single Node",
        spark_version=latest_lts.id,
        node_type_id=smallest.id,
        autotermination_minutes=20,
        spark_conf={
            "spark.databricks.cluster.profile": "singleNode",
            "spark.master": "local[*]",
        },
        custom_tags={
            "ResourceClass": "SingleNode",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latestLts = Databricks.GetSparkVersion.Invoke(new()
        {
            LongTermSupport = true,
        });
    
        var singleNode = new Databricks.Cluster("singleNode", new()
        {
            ClusterName = "Single Node",
            SparkVersion = latestLts.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 20,
            SparkConf = 
            {
                { "spark.databricks.cluster.profile", "singleNode" },
                { "spark.master", "local[*]" },
            },
            CustomTags = 
            {
                { "ResourceClass", "SingleNode" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latestLts, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{
    			LongTermSupport: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "singleNode", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Single Node"),
    			SparkVersion:           pulumi.String(latestLts.Id),
    			NodeTypeId:             pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(20),
    			SparkConf: pulumi.Map{
    				"spark.databricks.cluster.profile": pulumi.Any("singleNode"),
    				"spark.master":                     pulumi.Any("local[*]"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("SingleNode"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latestLts = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
                .longTermSupport(true)
                .build());
    
            var singleNode = new Cluster("singleNode", ClusterArgs.builder()        
                .clusterName("Single Node")
                .sparkVersion(latestLts.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(20)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.cluster.profile", "singleNode"),
                    Map.entry("spark.master", "local[*]")
                ))
                .customTags(Map.of("ResourceClass", "SingleNode"))
                .build());
    
        }
    }
    
    resources:
      singleNode:
        type: databricks:Cluster
        properties:
          clusterName: Single Node
          sparkVersion: ${latestLts.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 20
          sparkConf:
            spark.databricks.cluster.profile: singleNode
            spark.master: local[*]
          customTags:
            ResourceClass: SingleNode
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latestLts:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments:
            longTermSupport: true
    
    minWorkers Number
    The minimum number of workers to which the cluster can scale down when underutilized. It is also the initial number of workers the cluster will have after creation.

    ClusterAwsAttributes, ClusterAwsAttributesArgs

    Availability string
    Availability type used for all subsequent nodes past the first_on_demand ones. Valid values are SPOT, SPOT_WITH_FALLBACK and ON_DEMAND. Note: If first_on_demand is zero, this availability type will be used for the entire cluster. Backend default value is SPOT_WITH_FALLBACK and could change in the future
    EbsVolumeCount int
    The number of volumes launched for each instance. You can choose up to 10 volumes. This feature is only enabled for supported node types. Legacy node types cannot specify custom EBS volumes. For node types with no instance store, at least one EBS volume needs to be specified; otherwise, cluster creation will fail. These EBS volumes will be mounted at /ebs0, /ebs1, and etc. Instance store volumes will be mounted at /local_disk0, /local_disk1, and etc. If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for scratch storage because heterogeneously sized scratch devices can lead to inefficient disk utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance store volumes. If EBS volumes are specified, then the Spark configuration spark.local.dir will be overridden.
    EbsVolumeIops int
    EbsVolumeSize int
    The size of each EBS volume (in GiB) launched for each instance. For general purpose SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, this value must be within the range 500 - 4096. Custom EBS volumes cannot be specified for the legacy node types (memory-optimized and compute-optimized).
    EbsVolumeThroughput int
    EbsVolumeType string
    The type of EBS volumes that will be launched with this cluster. Valid values are GENERAL_PURPOSE_SSD or THROUGHPUT_OPTIMIZED_HDD. Use this option only if you're not picking Delta Optimized i3.* node types.
    FirstOnDemand int
    The first first_on_demand nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, first_on_demand nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. Backend default value is 1 and could change in the future
    InstanceProfileArn string
    SpotBidPricePercent int
    The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the cluster needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this cluster, only spot instances whose max price percentage matches this field will be considered. For safety, we enforce this field to be no more than 10000.
    ZoneId string
    Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like us-west-2a. The provided availability zone must be in the same region as the Databricks deployment. For example, us-west-2a is not a valid zone ID if the Databricks deployment resides in the us-east-1 region. Enable automatic availability zone selection ("Auto-AZ"), by setting the value auto. Databricks selects the AZ based on available IPs in the workspace subnets and retries in other availability zones if AWS returns insufficient capacity errors.
    Availability string
    Availability type used for all subsequent nodes past the first_on_demand ones. Valid values are SPOT, SPOT_WITH_FALLBACK and ON_DEMAND. Note: If first_on_demand is zero, this availability type will be used for the entire cluster. Backend default value is SPOT_WITH_FALLBACK and could change in the future
    EbsVolumeCount int
    The number of volumes launched for each instance. You can choose up to 10 volumes. This feature is only enabled for supported node types. Legacy node types cannot specify custom EBS volumes. For node types with no instance store, at least one EBS volume needs to be specified; otherwise, cluster creation will fail. These EBS volumes will be mounted at /ebs0, /ebs1, and etc. Instance store volumes will be mounted at /local_disk0, /local_disk1, and etc. If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for scratch storage because heterogeneously sized scratch devices can lead to inefficient disk utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance store volumes. If EBS volumes are specified, then the Spark configuration spark.local.dir will be overridden.
    EbsVolumeIops int
    EbsVolumeSize int
    The size of each EBS volume (in GiB) launched for each instance. For general purpose SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, this value must be within the range 500 - 4096. Custom EBS volumes cannot be specified for the legacy node types (memory-optimized and compute-optimized).
    EbsVolumeThroughput int
    EbsVolumeType string
    The type of EBS volumes that will be launched with this cluster. Valid values are GENERAL_PURPOSE_SSD or THROUGHPUT_OPTIMIZED_HDD. Use this option only if you're not picking Delta Optimized i3.* node types.
    FirstOnDemand int
    The first first_on_demand nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, first_on_demand nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. Backend default value is 1 and could change in the future
    InstanceProfileArn string
    SpotBidPricePercent int
    The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the cluster needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this cluster, only spot instances whose max price percentage matches this field will be considered. For safety, we enforce this field to be no more than 10000.
    ZoneId string
    Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like us-west-2a. The provided availability zone must be in the same region as the Databricks deployment. For example, us-west-2a is not a valid zone ID if the Databricks deployment resides in the us-east-1 region. Enable automatic availability zone selection ("Auto-AZ"), by setting the value auto. Databricks selects the AZ based on available IPs in the workspace subnets and retries in other availability zones if AWS returns insufficient capacity errors.
    availability String
    Availability type used for all subsequent nodes past the first_on_demand ones. Valid values are SPOT, SPOT_WITH_FALLBACK and ON_DEMAND. Note: If first_on_demand is zero, this availability type will be used for the entire cluster. Backend default value is SPOT_WITH_FALLBACK and could change in the future
    ebsVolumeCount Integer
    The number of volumes launched for each instance. You can choose up to 10 volumes. This feature is only enabled for supported node types. Legacy node types cannot specify custom EBS volumes. For node types with no instance store, at least one EBS volume needs to be specified; otherwise, cluster creation will fail. These EBS volumes will be mounted at /ebs0, /ebs1, and etc. Instance store volumes will be mounted at /local_disk0, /local_disk1, and etc. If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for scratch storage because heterogeneously sized scratch devices can lead to inefficient disk utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance store volumes. If EBS volumes are specified, then the Spark configuration spark.local.dir will be overridden.
    ebsVolumeIops Integer
    ebsVolumeSize Integer
    The size of each EBS volume (in GiB) launched for each instance. For general purpose SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, this value must be within the range 500 - 4096. Custom EBS volumes cannot be specified for the legacy node types (memory-optimized and compute-optimized).
    ebsVolumeThroughput Integer
    ebsVolumeType String
    The type of EBS volumes that will be launched with this cluster. Valid values are GENERAL_PURPOSE_SSD or THROUGHPUT_OPTIMIZED_HDD. Use this option only if you're not picking Delta Optimized i3.* node types.
    firstOnDemand Integer
    The first first_on_demand nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, first_on_demand nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. Backend default value is 1 and could change in the future
    instanceProfileArn String
    spotBidPricePercent Integer
    The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the cluster needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this cluster, only spot instances whose max price percentage matches this field will be considered. For safety, we enforce this field to be no more than 10000.
    zoneId String
    Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like us-west-2a. The provided availability zone must be in the same region as the Databricks deployment. For example, us-west-2a is not a valid zone ID if the Databricks deployment resides in the us-east-1 region. Enable automatic availability zone selection ("Auto-AZ"), by setting the value auto. Databricks selects the AZ based on available IPs in the workspace subnets and retries in other availability zones if AWS returns insufficient capacity errors.
    availability string
    Availability type used for all subsequent nodes past the first_on_demand ones. Valid values are SPOT, SPOT_WITH_FALLBACK and ON_DEMAND. Note: If first_on_demand is zero, this availability type will be used for the entire cluster. Backend default value is SPOT_WITH_FALLBACK and could change in the future
    ebsVolumeCount number
    The number of volumes launched for each instance. You can choose up to 10 volumes. This feature is only enabled for supported node types. Legacy node types cannot specify custom EBS volumes. For node types with no instance store, at least one EBS volume needs to be specified; otherwise, cluster creation will fail. These EBS volumes will be mounted at /ebs0, /ebs1, and etc. Instance store volumes will be mounted at /local_disk0, /local_disk1, and etc. If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for scratch storage because heterogeneously sized scratch devices can lead to inefficient disk utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance store volumes. If EBS volumes are specified, then the Spark configuration spark.local.dir will be overridden.
    ebsVolumeIops number
    ebsVolumeSize number
    The size of each EBS volume (in GiB) launched for each instance. For general purpose SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, this value must be within the range 500 - 4096. Custom EBS volumes cannot be specified for the legacy node types (memory-optimized and compute-optimized).
    ebsVolumeThroughput number
    ebsVolumeType string
    The type of EBS volumes that will be launched with this cluster. Valid values are GENERAL_PURPOSE_SSD or THROUGHPUT_OPTIMIZED_HDD. Use this option only if you're not picking Delta Optimized i3.* node types.
    firstOnDemand number
    The first first_on_demand nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, first_on_demand nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. Backend default value is 1 and could change in the future
    instanceProfileArn string
    spotBidPricePercent number
    The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the cluster needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this cluster, only spot instances whose max price percentage matches this field will be considered. For safety, we enforce this field to be no more than 10000.
    zoneId string
    Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like us-west-2a. The provided availability zone must be in the same region as the Databricks deployment. For example, us-west-2a is not a valid zone ID if the Databricks deployment resides in the us-east-1 region. Enable automatic availability zone selection ("Auto-AZ"), by setting the value auto. Databricks selects the AZ based on available IPs in the workspace subnets and retries in other availability zones if AWS returns insufficient capacity errors.
    availability str
    Availability type used for all subsequent nodes past the first_on_demand ones. Valid values are SPOT, SPOT_WITH_FALLBACK and ON_DEMAND. Note: If first_on_demand is zero, this availability type will be used for the entire cluster. Backend default value is SPOT_WITH_FALLBACK and could change in the future
    ebs_volume_count int
    The number of volumes launched for each instance. You can choose up to 10 volumes. This feature is only enabled for supported node types. Legacy node types cannot specify custom EBS volumes. For node types with no instance store, at least one EBS volume needs to be specified; otherwise, cluster creation will fail. These EBS volumes will be mounted at /ebs0, /ebs1, and etc. Instance store volumes will be mounted at /local_disk0, /local_disk1, and etc. If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for scratch storage because heterogeneously sized scratch devices can lead to inefficient disk utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance store volumes. If EBS volumes are specified, then the Spark configuration spark.local.dir will be overridden.
    ebs_volume_iops int
    ebs_volume_size int
    The size of each EBS volume (in GiB) launched for each instance. For general purpose SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, this value must be within the range 500 - 4096. Custom EBS volumes cannot be specified for the legacy node types (memory-optimized and compute-optimized).
    ebs_volume_throughput int
    ebs_volume_type str
    The type of EBS volumes that will be launched with this cluster. Valid values are GENERAL_PURPOSE_SSD or THROUGHPUT_OPTIMIZED_HDD. Use this option only if you're not picking Delta Optimized i3.* node types.
    first_on_demand int
    The first first_on_demand nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, first_on_demand nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. Backend default value is 1 and could change in the future
    instance_profile_arn str
    spot_bid_price_percent int
    The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the cluster needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this cluster, only spot instances whose max price percentage matches this field will be considered. For safety, we enforce this field to be no more than 10000.
    zone_id str
    Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like us-west-2a. The provided availability zone must be in the same region as the Databricks deployment. For example, us-west-2a is not a valid zone ID if the Databricks deployment resides in the us-east-1 region. Enable automatic availability zone selection ("Auto-AZ"), by setting the value auto. Databricks selects the AZ based on available IPs in the workspace subnets and retries in other availability zones if AWS returns insufficient capacity errors.
    availability String
    Availability type used for all subsequent nodes past the first_on_demand ones. Valid values are SPOT, SPOT_WITH_FALLBACK and ON_DEMAND. Note: If first_on_demand is zero, this availability type will be used for the entire cluster. Backend default value is SPOT_WITH_FALLBACK and could change in the future
    ebsVolumeCount Number
    The number of volumes launched for each instance. You can choose up to 10 volumes. This feature is only enabled for supported node types. Legacy node types cannot specify custom EBS volumes. For node types with no instance store, at least one EBS volume needs to be specified; otherwise, cluster creation will fail. These EBS volumes will be mounted at /ebs0, /ebs1, and etc. Instance store volumes will be mounted at /local_disk0, /local_disk1, and etc. If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for scratch storage because heterogeneously sized scratch devices can lead to inefficient disk utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance store volumes. If EBS volumes are specified, then the Spark configuration spark.local.dir will be overridden.
    ebsVolumeIops Number
    ebsVolumeSize Number
    The size of each EBS volume (in GiB) launched for each instance. For general purpose SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, this value must be within the range 500 - 4096. Custom EBS volumes cannot be specified for the legacy node types (memory-optimized and compute-optimized).
    ebsVolumeThroughput Number
    ebsVolumeType String
    The type of EBS volumes that will be launched with this cluster. Valid values are GENERAL_PURPOSE_SSD or THROUGHPUT_OPTIMIZED_HDD. Use this option only if you're not picking Delta Optimized i3.* node types.
    firstOnDemand Number
    The first first_on_demand nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, first_on_demand nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. Backend default value is 1 and could change in the future
    instanceProfileArn String
    spotBidPricePercent Number
    The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the cluster needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this cluster, only spot instances whose max price percentage matches this field will be considered. For safety, we enforce this field to be no more than 10000.
    zoneId String
    Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like us-west-2a. The provided availability zone must be in the same region as the Databricks deployment. For example, us-west-2a is not a valid zone ID if the Databricks deployment resides in the us-east-1 region. Enable automatic availability zone selection ("Auto-AZ"), by setting the value auto. Databricks selects the AZ based on available IPs in the workspace subnets and retries in other availability zones if AWS returns insufficient capacity errors.

    ClusterAzureAttributes, ClusterAzureAttributesArgs

    Availability string
    Availability type used for all subsequent nodes past the first_on_demand ones. Valid values are SPOT_AZURE, SPOT_WITH_FALLBACK_AZURE, and ON_DEMAND_AZURE. Note: If first_on_demand is zero, this availability type will be used for the entire cluster.
    FirstOnDemand int
    The first first_on_demand nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, first_on_demand nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster.
    LogAnalyticsInfo ClusterAzureAttributesLogAnalyticsInfo
    SpotBidMaxPrice double
    The max price for Azure spot instances. Use -1 to specify the lowest price.
    Availability string
    Availability type used for all subsequent nodes past the first_on_demand ones. Valid values are SPOT_AZURE, SPOT_WITH_FALLBACK_AZURE, and ON_DEMAND_AZURE. Note: If first_on_demand is zero, this availability type will be used for the entire cluster.
    FirstOnDemand int
    The first first_on_demand nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, first_on_demand nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster.
    LogAnalyticsInfo ClusterAzureAttributesLogAnalyticsInfo
    SpotBidMaxPrice float64
    The max price for Azure spot instances. Use -1 to specify the lowest price.
    availability String
    Availability type used for all subsequent nodes past the first_on_demand ones. Valid values are SPOT_AZURE, SPOT_WITH_FALLBACK_AZURE, and ON_DEMAND_AZURE. Note: If first_on_demand is zero, this availability type will be used for the entire cluster.
    firstOnDemand Integer
    The first first_on_demand nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, first_on_demand nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster.
    logAnalyticsInfo ClusterAzureAttributesLogAnalyticsInfo
    spotBidMaxPrice Double
    The max price for Azure spot instances. Use -1 to specify the lowest price.
    availability string
    Availability type used for all subsequent nodes past the first_on_demand ones. Valid values are SPOT_AZURE, SPOT_WITH_FALLBACK_AZURE, and ON_DEMAND_AZURE. Note: If first_on_demand is zero, this availability type will be used for the entire cluster.
    firstOnDemand number
    The first first_on_demand nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, first_on_demand nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster.
    logAnalyticsInfo ClusterAzureAttributesLogAnalyticsInfo
    spotBidMaxPrice number
    The max price for Azure spot instances. Use -1 to specify the lowest price.
    availability str
    Availability type used for all subsequent nodes past the first_on_demand ones. Valid values are SPOT_AZURE, SPOT_WITH_FALLBACK_AZURE, and ON_DEMAND_AZURE. Note: If first_on_demand is zero, this availability type will be used for the entire cluster.
    first_on_demand int
    The first first_on_demand nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, first_on_demand nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster.
    log_analytics_info ClusterAzureAttributesLogAnalyticsInfo
    spot_bid_max_price float
    The max price for Azure spot instances. Use -1 to specify the lowest price.
    availability String
    Availability type used for all subsequent nodes past the first_on_demand ones. Valid values are SPOT_AZURE, SPOT_WITH_FALLBACK_AZURE, and ON_DEMAND_AZURE. Note: If first_on_demand is zero, this availability type will be used for the entire cluster.
    firstOnDemand Number
    The first first_on_demand nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, first_on_demand nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster.
    logAnalyticsInfo Property Map
    spotBidMaxPrice Number
    The max price for Azure spot instances. Use -1 to specify the lowest price.

    ClusterAzureAttributesLogAnalyticsInfo, ClusterAzureAttributesLogAnalyticsInfoArgs

    ClusterClusterLogConf, ClusterClusterLogConfArgs

    ClusterClusterLogConfDbfs, ClusterClusterLogConfDbfsArgs

    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination str
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.

    ClusterClusterLogConfS3, ClusterClusterLogConfS3Args

    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    CannedAcl string
    Set canned access control list, e.g. bucket-owner-full-control. If canned_cal is set, the cluster instance profile must have s3:PutObjectAcl permission on the destination bucket and prefix. The full list of possible canned ACLs can be found here. By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set bucket-owner-full-control to make bucket owners able to read the logs.
    EnableEncryption bool
    Enable server-side encryption, false by default.
    EncryptionType string
    The encryption type, it could be sse-s3 or sse-kms. It is used only when encryption is enabled, and the default type is sse-s3.
    Endpoint string
    S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or endpoint needs to be set. If both are set, the endpoint is used.
    KmsKey string
    KMS key used if encryption is enabled and encryption type is set to sse-kms.
    Region string
    S3 region, e.g. us-west-2. Either region or endpoint must be set. If both are set, the endpoint is used.
    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    CannedAcl string
    Set canned access control list, e.g. bucket-owner-full-control. If canned_cal is set, the cluster instance profile must have s3:PutObjectAcl permission on the destination bucket and prefix. The full list of possible canned ACLs can be found here. By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set bucket-owner-full-control to make bucket owners able to read the logs.
    EnableEncryption bool
    Enable server-side encryption, false by default.
    EncryptionType string
    The encryption type, it could be sse-s3 or sse-kms. It is used only when encryption is enabled, and the default type is sse-s3.
    Endpoint string
    S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or endpoint needs to be set. If both are set, the endpoint is used.
    KmsKey string
    KMS key used if encryption is enabled and encryption type is set to sse-kms.
    Region string
    S3 region, e.g. us-west-2. Either region or endpoint must be set. If both are set, the endpoint is used.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    cannedAcl String
    Set canned access control list, e.g. bucket-owner-full-control. If canned_cal is set, the cluster instance profile must have s3:PutObjectAcl permission on the destination bucket and prefix. The full list of possible canned ACLs can be found here. By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set bucket-owner-full-control to make bucket owners able to read the logs.
    enableEncryption Boolean
    Enable server-side encryption, false by default.
    encryptionType String
    The encryption type, it could be sse-s3 or sse-kms. It is used only when encryption is enabled, and the default type is sse-s3.
    endpoint String
    S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or endpoint needs to be set. If both are set, the endpoint is used.
    kmsKey String
    KMS key used if encryption is enabled and encryption type is set to sse-kms.
    region String
    S3 region, e.g. us-west-2. Either region or endpoint must be set. If both are set, the endpoint is used.
    destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    cannedAcl string
    Set canned access control list, e.g. bucket-owner-full-control. If canned_cal is set, the cluster instance profile must have s3:PutObjectAcl permission on the destination bucket and prefix. The full list of possible canned ACLs can be found here. By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set bucket-owner-full-control to make bucket owners able to read the logs.
    enableEncryption boolean
    Enable server-side encryption, false by default.
    encryptionType string
    The encryption type, it could be sse-s3 or sse-kms. It is used only when encryption is enabled, and the default type is sse-s3.
    endpoint string
    S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or endpoint needs to be set. If both are set, the endpoint is used.
    kmsKey string
    KMS key used if encryption is enabled and encryption type is set to sse-kms.
    region string
    S3 region, e.g. us-west-2. Either region or endpoint must be set. If both are set, the endpoint is used.
    destination str
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    canned_acl str
    Set canned access control list, e.g. bucket-owner-full-control. If canned_cal is set, the cluster instance profile must have s3:PutObjectAcl permission on the destination bucket and prefix. The full list of possible canned ACLs can be found here. By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set bucket-owner-full-control to make bucket owners able to read the logs.
    enable_encryption bool
    Enable server-side encryption, false by default.
    encryption_type str
    The encryption type, it could be sse-s3 or sse-kms. It is used only when encryption is enabled, and the default type is sse-s3.
    endpoint str
    S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or endpoint needs to be set. If both are set, the endpoint is used.
    kms_key str
    KMS key used if encryption is enabled and encryption type is set to sse-kms.
    region str
    S3 region, e.g. us-west-2. Either region or endpoint must be set. If both are set, the endpoint is used.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    cannedAcl String
    Set canned access control list, e.g. bucket-owner-full-control. If canned_cal is set, the cluster instance profile must have s3:PutObjectAcl permission on the destination bucket and prefix. The full list of possible canned ACLs can be found here. By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set bucket-owner-full-control to make bucket owners able to read the logs.
    enableEncryption Boolean
    Enable server-side encryption, false by default.
    encryptionType String
    The encryption type, it could be sse-s3 or sse-kms. It is used only when encryption is enabled, and the default type is sse-s3.
    endpoint String
    S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or endpoint needs to be set. If both are set, the endpoint is used.
    kmsKey String
    KMS key used if encryption is enabled and encryption type is set to sse-kms.
    region String
    S3 region, e.g. us-west-2. Either region or endpoint must be set. If both are set, the endpoint is used.

    ClusterClusterMountInfo, ClusterClusterMountInfoArgs

    LocalMountDirPath string

    path inside the Spark container.

    For example, you can mount Azure Data Lake Storage container using the following code:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const storageAccount = "ewfw3ggwegwg"; const storageContainer = "test"; const withNfs = new databricks.Cluster("withNfs", {clusterMountInfos: [{ localMountDirPath: "/mnt/nfs-test", networkFilesystemInfo: { mountOptions: "sec=sys,vers=3,nolock,proto=tcp", serverAddress: ${storageAccount}.blob.core.windows.net, }, remoteMountDirPath: ${storageAccount}/${storageContainer}, }]});

    import pulumi
    import pulumi_databricks as databricks
    
    storage_account = "ewfw3ggwegwg"
    storage_container = "test"
    with_nfs = databricks.Cluster("withNfs", cluster_mount_infos=[databricks.ClusterClusterMountInfoArgs(
        local_mount_dir_path="/mnt/nfs-test",
        network_filesystem_info=databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs(
            mount_options="sec=sys,vers=3,nolock,proto=tcp",
            server_address=f"{storage_account}.blob.core.windows.net",
        ),
        remote_mount_dir_path=f"{storage_account}/{storage_container}",
    )])
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var storageAccount = "ewfw3ggwegwg";
    
        var storageContainer = "test";
    
        var withNfs = new Databricks.Cluster("withNfs", new()
        {
            ClusterMountInfos = new[]
            {
                new Databricks.Inputs.ClusterClusterMountInfoArgs
                {
                    LocalMountDirPath = "/mnt/nfs-test",
                    NetworkFilesystemInfo = new Databricks.Inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs
                    {
                        MountOptions = "sec=sys,vers=3,nolock,proto=tcp",
                        ServerAddress = $"{storageAccount}.blob.core.windows.net",
                    },
                    RemoteMountDirPath = $"{storageAccount}/{storageContainer}",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		storageAccount := "ewfw3ggwegwg"
    		storageContainer := "test"
    		_, err := databricks.NewCluster(ctx, "withNfs", &databricks.ClusterArgs{
    			ClusterMountInfos: databricks.ClusterClusterMountInfoArray{
    				&databricks.ClusterClusterMountInfoArgs{
    					LocalMountDirPath: pulumi.String("/mnt/nfs-test"),
    					NetworkFilesystemInfo: &databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs{
    						MountOptions:  pulumi.String("sec=sys,vers=3,nolock,proto=tcp"),
    						ServerAddress: pulumi.String(fmt.Sprintf("%v.blob.core.windows.net", storageAccount)),
    					},
    					RemoteMountDirPath: pulumi.String(fmt.Sprintf("%v/%v", storageAccount, storageContainer)),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterClusterMountInfoArgs;
    import com.pulumi.databricks.inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var storageAccount = "ewfw3ggwegwg";
    
            final var storageContainer = "test";
    
            var withNfs = new Cluster("withNfs", ClusterArgs.builder()        
                .clusterMountInfos(ClusterClusterMountInfoArgs.builder()
                    .localMountDirPath("/mnt/nfs-test")
                    .networkFilesystemInfo(ClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
                        .mountOptions("sec=sys,vers=3,nolock,proto=tcp")
                        .serverAddress(String.format("%s.blob.core.windows.net", storageAccount))
                        .build())
                    .remoteMountDirPath(String.format("%s/%s", storageAccount,storageContainer))
                    .build())
                .build());
    
        }
    }
    
    resources:
      withNfs:
        type: databricks:Cluster
        properties:
          # ...
          clusterMountInfos:
            - localMountDirPath: /mnt/nfs-test
              networkFilesystemInfo:
                mountOptions: sec=sys,vers=3,nolock,proto=tcp
                serverAddress: ${storageAccount}.blob.core.windows.net
              remoteMountDirPath: ${storageAccount}/${storageContainer}
    variables:
      storageAccount: ewfw3ggwegwg
      storageContainer: test
    
    NetworkFilesystemInfo ClusterClusterMountInfoNetworkFilesystemInfo
    block specifying connection. It consists of:
    RemoteMountDirPath string
    string specifying path to mount on the remote service.
    LocalMountDirPath string

    path inside the Spark container.

    For example, you can mount Azure Data Lake Storage container using the following code:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const storageAccount = "ewfw3ggwegwg"; const storageContainer = "test"; const withNfs = new databricks.Cluster("withNfs", {clusterMountInfos: [{ localMountDirPath: "/mnt/nfs-test", networkFilesystemInfo: { mountOptions: "sec=sys,vers=3,nolock,proto=tcp", serverAddress: ${storageAccount}.blob.core.windows.net, }, remoteMountDirPath: ${storageAccount}/${storageContainer}, }]});

    import pulumi
    import pulumi_databricks as databricks
    
    storage_account = "ewfw3ggwegwg"
    storage_container = "test"
    with_nfs = databricks.Cluster("withNfs", cluster_mount_infos=[databricks.ClusterClusterMountInfoArgs(
        local_mount_dir_path="/mnt/nfs-test",
        network_filesystem_info=databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs(
            mount_options="sec=sys,vers=3,nolock,proto=tcp",
            server_address=f"{storage_account}.blob.core.windows.net",
        ),
        remote_mount_dir_path=f"{storage_account}/{storage_container}",
    )])
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var storageAccount = "ewfw3ggwegwg";
    
        var storageContainer = "test";
    
        var withNfs = new Databricks.Cluster("withNfs", new()
        {
            ClusterMountInfos = new[]
            {
                new Databricks.Inputs.ClusterClusterMountInfoArgs
                {
                    LocalMountDirPath = "/mnt/nfs-test",
                    NetworkFilesystemInfo = new Databricks.Inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs
                    {
                        MountOptions = "sec=sys,vers=3,nolock,proto=tcp",
                        ServerAddress = $"{storageAccount}.blob.core.windows.net",
                    },
                    RemoteMountDirPath = $"{storageAccount}/{storageContainer}",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		storageAccount := "ewfw3ggwegwg"
    		storageContainer := "test"
    		_, err := databricks.NewCluster(ctx, "withNfs", &databricks.ClusterArgs{
    			ClusterMountInfos: databricks.ClusterClusterMountInfoArray{
    				&databricks.ClusterClusterMountInfoArgs{
    					LocalMountDirPath: pulumi.String("/mnt/nfs-test"),
    					NetworkFilesystemInfo: &databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs{
    						MountOptions:  pulumi.String("sec=sys,vers=3,nolock,proto=tcp"),
    						ServerAddress: pulumi.String(fmt.Sprintf("%v.blob.core.windows.net", storageAccount)),
    					},
    					RemoteMountDirPath: pulumi.String(fmt.Sprintf("%v/%v", storageAccount, storageContainer)),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterClusterMountInfoArgs;
    import com.pulumi.databricks.inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var storageAccount = "ewfw3ggwegwg";
    
            final var storageContainer = "test";
    
            var withNfs = new Cluster("withNfs", ClusterArgs.builder()        
                .clusterMountInfos(ClusterClusterMountInfoArgs.builder()
                    .localMountDirPath("/mnt/nfs-test")
                    .networkFilesystemInfo(ClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
                        .mountOptions("sec=sys,vers=3,nolock,proto=tcp")
                        .serverAddress(String.format("%s.blob.core.windows.net", storageAccount))
                        .build())
                    .remoteMountDirPath(String.format("%s/%s", storageAccount,storageContainer))
                    .build())
                .build());
    
        }
    }
    
    resources:
      withNfs:
        type: databricks:Cluster
        properties:
          # ...
          clusterMountInfos:
            - localMountDirPath: /mnt/nfs-test
              networkFilesystemInfo:
                mountOptions: sec=sys,vers=3,nolock,proto=tcp
                serverAddress: ${storageAccount}.blob.core.windows.net
              remoteMountDirPath: ${storageAccount}/${storageContainer}
    variables:
      storageAccount: ewfw3ggwegwg
      storageContainer: test
    
    NetworkFilesystemInfo ClusterClusterMountInfoNetworkFilesystemInfo
    block specifying connection. It consists of:
    RemoteMountDirPath string
    string specifying path to mount on the remote service.
    localMountDirPath String

    path inside the Spark container.

    For example, you can mount Azure Data Lake Storage container using the following code:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const storageAccount = "ewfw3ggwegwg"; const storageContainer = "test"; const withNfs = new databricks.Cluster("withNfs", {clusterMountInfos: [{ localMountDirPath: "/mnt/nfs-test", networkFilesystemInfo: { mountOptions: "sec=sys,vers=3,nolock,proto=tcp", serverAddress: ${storageAccount}.blob.core.windows.net, }, remoteMountDirPath: ${storageAccount}/${storageContainer}, }]});

    import pulumi
    import pulumi_databricks as databricks
    
    storage_account = "ewfw3ggwegwg"
    storage_container = "test"
    with_nfs = databricks.Cluster("withNfs", cluster_mount_infos=[databricks.ClusterClusterMountInfoArgs(
        local_mount_dir_path="/mnt/nfs-test",
        network_filesystem_info=databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs(
            mount_options="sec=sys,vers=3,nolock,proto=tcp",
            server_address=f"{storage_account}.blob.core.windows.net",
        ),
        remote_mount_dir_path=f"{storage_account}/{storage_container}",
    )])
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var storageAccount = "ewfw3ggwegwg";
    
        var storageContainer = "test";
    
        var withNfs = new Databricks.Cluster("withNfs", new()
        {
            ClusterMountInfos = new[]
            {
                new Databricks.Inputs.ClusterClusterMountInfoArgs
                {
                    LocalMountDirPath = "/mnt/nfs-test",
                    NetworkFilesystemInfo = new Databricks.Inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs
                    {
                        MountOptions = "sec=sys,vers=3,nolock,proto=tcp",
                        ServerAddress = $"{storageAccount}.blob.core.windows.net",
                    },
                    RemoteMountDirPath = $"{storageAccount}/{storageContainer}",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		storageAccount := "ewfw3ggwegwg"
    		storageContainer := "test"
    		_, err := databricks.NewCluster(ctx, "withNfs", &databricks.ClusterArgs{
    			ClusterMountInfos: databricks.ClusterClusterMountInfoArray{
    				&databricks.ClusterClusterMountInfoArgs{
    					LocalMountDirPath: pulumi.String("/mnt/nfs-test"),
    					NetworkFilesystemInfo: &databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs{
    						MountOptions:  pulumi.String("sec=sys,vers=3,nolock,proto=tcp"),
    						ServerAddress: pulumi.String(fmt.Sprintf("%v.blob.core.windows.net", storageAccount)),
    					},
    					RemoteMountDirPath: pulumi.String(fmt.Sprintf("%v/%v", storageAccount, storageContainer)),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterClusterMountInfoArgs;
    import com.pulumi.databricks.inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var storageAccount = "ewfw3ggwegwg";
    
            final var storageContainer = "test";
    
            var withNfs = new Cluster("withNfs", ClusterArgs.builder()        
                .clusterMountInfos(ClusterClusterMountInfoArgs.builder()
                    .localMountDirPath("/mnt/nfs-test")
                    .networkFilesystemInfo(ClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
                        .mountOptions("sec=sys,vers=3,nolock,proto=tcp")
                        .serverAddress(String.format("%s.blob.core.windows.net", storageAccount))
                        .build())
                    .remoteMountDirPath(String.format("%s/%s", storageAccount,storageContainer))
                    .build())
                .build());
    
        }
    }
    
    resources:
      withNfs:
        type: databricks:Cluster
        properties:
          # ...
          clusterMountInfos:
            - localMountDirPath: /mnt/nfs-test
              networkFilesystemInfo:
                mountOptions: sec=sys,vers=3,nolock,proto=tcp
                serverAddress: ${storageAccount}.blob.core.windows.net
              remoteMountDirPath: ${storageAccount}/${storageContainer}
    variables:
      storageAccount: ewfw3ggwegwg
      storageContainer: test
    
    networkFilesystemInfo ClusterClusterMountInfoNetworkFilesystemInfo
    block specifying connection. It consists of:
    remoteMountDirPath String
    string specifying path to mount on the remote service.
    localMountDirPath string

    path inside the Spark container.

    For example, you can mount Azure Data Lake Storage container using the following code:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const storageAccount = "ewfw3ggwegwg"; const storageContainer = "test"; const withNfs = new databricks.Cluster("withNfs", {clusterMountInfos: [{ localMountDirPath: "/mnt/nfs-test", networkFilesystemInfo: { mountOptions: "sec=sys,vers=3,nolock,proto=tcp", serverAddress: ${storageAccount}.blob.core.windows.net, }, remoteMountDirPath: ${storageAccount}/${storageContainer}, }]});

    import pulumi
    import pulumi_databricks as databricks
    
    storage_account = "ewfw3ggwegwg"
    storage_container = "test"
    with_nfs = databricks.Cluster("withNfs", cluster_mount_infos=[databricks.ClusterClusterMountInfoArgs(
        local_mount_dir_path="/mnt/nfs-test",
        network_filesystem_info=databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs(
            mount_options="sec=sys,vers=3,nolock,proto=tcp",
            server_address=f"{storage_account}.blob.core.windows.net",
        ),
        remote_mount_dir_path=f"{storage_account}/{storage_container}",
    )])
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var storageAccount = "ewfw3ggwegwg";
    
        var storageContainer = "test";
    
        var withNfs = new Databricks.Cluster("withNfs", new()
        {
            ClusterMountInfos = new[]
            {
                new Databricks.Inputs.ClusterClusterMountInfoArgs
                {
                    LocalMountDirPath = "/mnt/nfs-test",
                    NetworkFilesystemInfo = new Databricks.Inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs
                    {
                        MountOptions = "sec=sys,vers=3,nolock,proto=tcp",
                        ServerAddress = $"{storageAccount}.blob.core.windows.net",
                    },
                    RemoteMountDirPath = $"{storageAccount}/{storageContainer}",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		storageAccount := "ewfw3ggwegwg"
    		storageContainer := "test"
    		_, err := databricks.NewCluster(ctx, "withNfs", &databricks.ClusterArgs{
    			ClusterMountInfos: databricks.ClusterClusterMountInfoArray{
    				&databricks.ClusterClusterMountInfoArgs{
    					LocalMountDirPath: pulumi.String("/mnt/nfs-test"),
    					NetworkFilesystemInfo: &databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs{
    						MountOptions:  pulumi.String("sec=sys,vers=3,nolock,proto=tcp"),
    						ServerAddress: pulumi.String(fmt.Sprintf("%v.blob.core.windows.net", storageAccount)),
    					},
    					RemoteMountDirPath: pulumi.String(fmt.Sprintf("%v/%v", storageAccount, storageContainer)),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterClusterMountInfoArgs;
    import com.pulumi.databricks.inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var storageAccount = "ewfw3ggwegwg";
    
            final var storageContainer = "test";
    
            var withNfs = new Cluster("withNfs", ClusterArgs.builder()        
                .clusterMountInfos(ClusterClusterMountInfoArgs.builder()
                    .localMountDirPath("/mnt/nfs-test")
                    .networkFilesystemInfo(ClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
                        .mountOptions("sec=sys,vers=3,nolock,proto=tcp")
                        .serverAddress(String.format("%s.blob.core.windows.net", storageAccount))
                        .build())
                    .remoteMountDirPath(String.format("%s/%s", storageAccount,storageContainer))
                    .build())
                .build());
    
        }
    }
    
    resources:
      withNfs:
        type: databricks:Cluster
        properties:
          # ...
          clusterMountInfos:
            - localMountDirPath: /mnt/nfs-test
              networkFilesystemInfo:
                mountOptions: sec=sys,vers=3,nolock,proto=tcp
                serverAddress: ${storageAccount}.blob.core.windows.net
              remoteMountDirPath: ${storageAccount}/${storageContainer}
    variables:
      storageAccount: ewfw3ggwegwg
      storageContainer: test
    
    networkFilesystemInfo ClusterClusterMountInfoNetworkFilesystemInfo
    block specifying connection. It consists of:
    remoteMountDirPath string
    string specifying path to mount on the remote service.
    local_mount_dir_path str

    path inside the Spark container.

    For example, you can mount Azure Data Lake Storage container using the following code:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const storageAccount = "ewfw3ggwegwg"; const storageContainer = "test"; const withNfs = new databricks.Cluster("withNfs", {clusterMountInfos: [{ localMountDirPath: "/mnt/nfs-test", networkFilesystemInfo: { mountOptions: "sec=sys,vers=3,nolock,proto=tcp", serverAddress: ${storageAccount}.blob.core.windows.net, }, remoteMountDirPath: ${storageAccount}/${storageContainer}, }]});

    import pulumi
    import pulumi_databricks as databricks
    
    storage_account = "ewfw3ggwegwg"
    storage_container = "test"
    with_nfs = databricks.Cluster("withNfs", cluster_mount_infos=[databricks.ClusterClusterMountInfoArgs(
        local_mount_dir_path="/mnt/nfs-test",
        network_filesystem_info=databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs(
            mount_options="sec=sys,vers=3,nolock,proto=tcp",
            server_address=f"{storage_account}.blob.core.windows.net",
        ),
        remote_mount_dir_path=f"{storage_account}/{storage_container}",
    )])
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var storageAccount = "ewfw3ggwegwg";
    
        var storageContainer = "test";
    
        var withNfs = new Databricks.Cluster("withNfs", new()
        {
            ClusterMountInfos = new[]
            {
                new Databricks.Inputs.ClusterClusterMountInfoArgs
                {
                    LocalMountDirPath = "/mnt/nfs-test",
                    NetworkFilesystemInfo = new Databricks.Inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs
                    {
                        MountOptions = "sec=sys,vers=3,nolock,proto=tcp",
                        ServerAddress = $"{storageAccount}.blob.core.windows.net",
                    },
                    RemoteMountDirPath = $"{storageAccount}/{storageContainer}",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		storageAccount := "ewfw3ggwegwg"
    		storageContainer := "test"
    		_, err := databricks.NewCluster(ctx, "withNfs", &databricks.ClusterArgs{
    			ClusterMountInfos: databricks.ClusterClusterMountInfoArray{
    				&databricks.ClusterClusterMountInfoArgs{
    					LocalMountDirPath: pulumi.String("/mnt/nfs-test"),
    					NetworkFilesystemInfo: &databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs{
    						MountOptions:  pulumi.String("sec=sys,vers=3,nolock,proto=tcp"),
    						ServerAddress: pulumi.String(fmt.Sprintf("%v.blob.core.windows.net", storageAccount)),
    					},
    					RemoteMountDirPath: pulumi.String(fmt.Sprintf("%v/%v", storageAccount, storageContainer)),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterClusterMountInfoArgs;
    import com.pulumi.databricks.inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var storageAccount = "ewfw3ggwegwg";
    
            final var storageContainer = "test";
    
            var withNfs = new Cluster("withNfs", ClusterArgs.builder()        
                .clusterMountInfos(ClusterClusterMountInfoArgs.builder()
                    .localMountDirPath("/mnt/nfs-test")
                    .networkFilesystemInfo(ClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
                        .mountOptions("sec=sys,vers=3,nolock,proto=tcp")
                        .serverAddress(String.format("%s.blob.core.windows.net", storageAccount))
                        .build())
                    .remoteMountDirPath(String.format("%s/%s", storageAccount,storageContainer))
                    .build())
                .build());
    
        }
    }
    
    resources:
      withNfs:
        type: databricks:Cluster
        properties:
          # ...
          clusterMountInfos:
            - localMountDirPath: /mnt/nfs-test
              networkFilesystemInfo:
                mountOptions: sec=sys,vers=3,nolock,proto=tcp
                serverAddress: ${storageAccount}.blob.core.windows.net
              remoteMountDirPath: ${storageAccount}/${storageContainer}
    variables:
      storageAccount: ewfw3ggwegwg
      storageContainer: test
    
    network_filesystem_info ClusterClusterMountInfoNetworkFilesystemInfo
    block specifying connection. It consists of:
    remote_mount_dir_path str
    string specifying path to mount on the remote service.
    localMountDirPath String

    path inside the Spark container.

    For example, you can mount Azure Data Lake Storage container using the following code:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const storageAccount = "ewfw3ggwegwg"; const storageContainer = "test"; const withNfs = new databricks.Cluster("withNfs", {clusterMountInfos: [{ localMountDirPath: "/mnt/nfs-test", networkFilesystemInfo: { mountOptions: "sec=sys,vers=3,nolock,proto=tcp", serverAddress: ${storageAccount}.blob.core.windows.net, }, remoteMountDirPath: ${storageAccount}/${storageContainer}, }]});

    import pulumi
    import pulumi_databricks as databricks
    
    storage_account = "ewfw3ggwegwg"
    storage_container = "test"
    with_nfs = databricks.Cluster("withNfs", cluster_mount_infos=[databricks.ClusterClusterMountInfoArgs(
        local_mount_dir_path="/mnt/nfs-test",
        network_filesystem_info=databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs(
            mount_options="sec=sys,vers=3,nolock,proto=tcp",
            server_address=f"{storage_account}.blob.core.windows.net",
        ),
        remote_mount_dir_path=f"{storage_account}/{storage_container}",
    )])
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var storageAccount = "ewfw3ggwegwg";
    
        var storageContainer = "test";
    
        var withNfs = new Databricks.Cluster("withNfs", new()
        {
            ClusterMountInfos = new[]
            {
                new Databricks.Inputs.ClusterClusterMountInfoArgs
                {
                    LocalMountDirPath = "/mnt/nfs-test",
                    NetworkFilesystemInfo = new Databricks.Inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs
                    {
                        MountOptions = "sec=sys,vers=3,nolock,proto=tcp",
                        ServerAddress = $"{storageAccount}.blob.core.windows.net",
                    },
                    RemoteMountDirPath = $"{storageAccount}/{storageContainer}",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		storageAccount := "ewfw3ggwegwg"
    		storageContainer := "test"
    		_, err := databricks.NewCluster(ctx, "withNfs", &databricks.ClusterArgs{
    			ClusterMountInfos: databricks.ClusterClusterMountInfoArray{
    				&databricks.ClusterClusterMountInfoArgs{
    					LocalMountDirPath: pulumi.String("/mnt/nfs-test"),
    					NetworkFilesystemInfo: &databricks.ClusterClusterMountInfoNetworkFilesystemInfoArgs{
    						MountOptions:  pulumi.String("sec=sys,vers=3,nolock,proto=tcp"),
    						ServerAddress: pulumi.String(fmt.Sprintf("%v.blob.core.windows.net", storageAccount)),
    					},
    					RemoteMountDirPath: pulumi.String(fmt.Sprintf("%v/%v", storageAccount, storageContainer)),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterClusterMountInfoArgs;
    import com.pulumi.databricks.inputs.ClusterClusterMountInfoNetworkFilesystemInfoArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var storageAccount = "ewfw3ggwegwg";
    
            final var storageContainer = "test";
    
            var withNfs = new Cluster("withNfs", ClusterArgs.builder()        
                .clusterMountInfos(ClusterClusterMountInfoArgs.builder()
                    .localMountDirPath("/mnt/nfs-test")
                    .networkFilesystemInfo(ClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
                        .mountOptions("sec=sys,vers=3,nolock,proto=tcp")
                        .serverAddress(String.format("%s.blob.core.windows.net", storageAccount))
                        .build())
                    .remoteMountDirPath(String.format("%s/%s", storageAccount,storageContainer))
                    .build())
                .build());
    
        }
    }
    
    resources:
      withNfs:
        type: databricks:Cluster
        properties:
          # ...
          clusterMountInfos:
            - localMountDirPath: /mnt/nfs-test
              networkFilesystemInfo:
                mountOptions: sec=sys,vers=3,nolock,proto=tcp
                serverAddress: ${storageAccount}.blob.core.windows.net
              remoteMountDirPath: ${storageAccount}/${storageContainer}
    variables:
      storageAccount: ewfw3ggwegwg
      storageContainer: test
    
    networkFilesystemInfo Property Map
    block specifying connection. It consists of:
    remoteMountDirPath String
    string specifying path to mount on the remote service.

    ClusterClusterMountInfoNetworkFilesystemInfo, ClusterClusterMountInfoNetworkFilesystemInfoArgs

    ServerAddress string
    host name.
    MountOptions string
    string that will be passed as options passed to the mount command.
    ServerAddress string
    host name.
    MountOptions string
    string that will be passed as options passed to the mount command.
    serverAddress String
    host name.
    mountOptions String
    string that will be passed as options passed to the mount command.
    serverAddress string
    host name.
    mountOptions string
    string that will be passed as options passed to the mount command.
    server_address str
    host name.
    mount_options str
    string that will be passed as options passed to the mount command.
    serverAddress String
    host name.
    mountOptions String
    string that will be passed as options passed to the mount command.

    ClusterDockerImage, ClusterDockerImageArgs

    Url string
    URL for the Docker image
    BasicAuth ClusterDockerImageBasicAuth

    basic_auth.username and basic_auth.password for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.

    Example usage with azurerm_container_registry, that you can adapt to your specific use-case:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    import * as docker from "@pulumi/docker";
    

    const thisdocker_registry_image = new docker.index.Docker_registry_image("thisdocker_registry_image", { name: ${azurerm_container_registry[&quot;this&quot;].login_server}/sample:latest, build: [{}], }); const thisCluster = new databricks.Cluster("thisCluster", {dockerImage: { url: thisdocker_registry_image.name, basicAuth: { username: azurerm_container_registry["this"].admin_username, password: azurerm_container_registry["this"].admin_password, }, }});

    import pulumi
    import pulumi_databricks as databricks
    import pulumi_docker as docker
    
    thisdocker_registry_image = docker.index.Docker_registry_image("thisdocker_registry_image",
        name=f{azurerm_container_registry.this.login_server}/sample:latest,
        build=[{}])
    this_cluster = databricks.Cluster("thisCluster", docker_image=databricks.ClusterDockerImageArgs(
        url=thisdocker_registry_image["name"],
        basic_auth=databricks.ClusterDockerImageBasicAuthArgs(
            username=azurerm_container_registry["this"]["admin_username"],
            password=azurerm_container_registry["this"]["admin_password"],
        ),
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    using Docker = Pulumi.Docker;
    
    return await Deployment.RunAsync(() => 
    {
        var thisdocker_registry_image = new Docker.Index.Docker_registry_image("thisdocker_registry_image", new()
        {
            Name = $"{azurerm_container_registry.This.Login_server}/sample:latest",
            Build = new[]
            {
                null,
            },
        });
    
        var thisCluster = new Databricks.Cluster("thisCluster", new()
        {
            DockerImage = new Databricks.Inputs.ClusterDockerImageArgs
            {
                Url = thisdocker_registry_image.Name,
                BasicAuth = new Databricks.Inputs.ClusterDockerImageBasicAuthArgs
                {
                    Username = azurerm_container_registry.This.Admin_username,
                    Password = azurerm_container_registry.This.Admin_password,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-docker/sdk/v4/go/docker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		thisdocker_registry_image, err := docker.NewDocker_registry_image(ctx, "thisdocker_registry_image", &docker.Docker_registry_imageArgs{
    			Name: fmt.Sprintf("%v/sample:latest", azurerm_container_registry.This.Login_server),
    			Build: []map[string]interface{}{
    				nil,
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "thisCluster", &databricks.ClusterArgs{
    			DockerImage: &databricks.ClusterDockerImageArgs{
    				Url: thisdocker_registry_image.Name,
    				BasicAuth: &databricks.ClusterDockerImageBasicAuthArgs{
    					Username: pulumi.Any(azurerm_container_registry.This.Admin_username),
    					Password: pulumi.Any(azurerm_container_registry.This.Admin_password),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.docker.docker_registry_image;
    import com.pulumi.docker.Docker_registry_imageArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterDockerImageArgs;
    import com.pulumi.databricks.inputs.ClusterDockerImageBasicAuthArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var thisdocker_registry_image = new Docker_registry_image("thisdocker_registry_image", Docker_registry_imageArgs.builder()        
                .name(String.format("%s/sample:latest", azurerm_container_registry.this().login_server()))
                .build()
                .build());
    
            var thisCluster = new Cluster("thisCluster", ClusterArgs.builder()        
                .dockerImage(ClusterDockerImageArgs.builder()
                    .url(thisdocker_registry_image.name())
                    .basicAuth(ClusterDockerImageBasicAuthArgs.builder()
                        .username(azurerm_container_registry.this().admin_username())
                        .password(azurerm_container_registry.this().admin_password())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      thisdocker_registry_image:
        type: docker:docker_registry_image
        properties:
          name: ${azurerm_container_registry.this.login_server}/sample:latest
          build:
            - {}
      thisCluster:
        type: databricks:Cluster
        properties:
          dockerImage:
            url: ${thisdocker_registry_image.name}
            basicAuth:
              username: ${azurerm_container_registry.this.admin_username}
              password: ${azurerm_container_registry.this.admin_password}
    
    Url string
    URL for the Docker image
    BasicAuth ClusterDockerImageBasicAuth

    basic_auth.username and basic_auth.password for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.

    Example usage with azurerm_container_registry, that you can adapt to your specific use-case:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    import * as docker from "@pulumi/docker";
    

    const thisdocker_registry_image = new docker.index.Docker_registry_image("thisdocker_registry_image", { name: ${azurerm_container_registry[&quot;this&quot;].login_server}/sample:latest, build: [{}], }); const thisCluster = new databricks.Cluster("thisCluster", {dockerImage: { url: thisdocker_registry_image.name, basicAuth: { username: azurerm_container_registry["this"].admin_username, password: azurerm_container_registry["this"].admin_password, }, }});

    import pulumi
    import pulumi_databricks as databricks
    import pulumi_docker as docker
    
    thisdocker_registry_image = docker.index.Docker_registry_image("thisdocker_registry_image",
        name=f{azurerm_container_registry.this.login_server}/sample:latest,
        build=[{}])
    this_cluster = databricks.Cluster("thisCluster", docker_image=databricks.ClusterDockerImageArgs(
        url=thisdocker_registry_image["name"],
        basic_auth=databricks.ClusterDockerImageBasicAuthArgs(
            username=azurerm_container_registry["this"]["admin_username"],
            password=azurerm_container_registry["this"]["admin_password"],
        ),
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    using Docker = Pulumi.Docker;
    
    return await Deployment.RunAsync(() => 
    {
        var thisdocker_registry_image = new Docker.Index.Docker_registry_image("thisdocker_registry_image", new()
        {
            Name = $"{azurerm_container_registry.This.Login_server}/sample:latest",
            Build = new[]
            {
                null,
            },
        });
    
        var thisCluster = new Databricks.Cluster("thisCluster", new()
        {
            DockerImage = new Databricks.Inputs.ClusterDockerImageArgs
            {
                Url = thisdocker_registry_image.Name,
                BasicAuth = new Databricks.Inputs.ClusterDockerImageBasicAuthArgs
                {
                    Username = azurerm_container_registry.This.Admin_username,
                    Password = azurerm_container_registry.This.Admin_password,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-docker/sdk/v4/go/docker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		thisdocker_registry_image, err := docker.NewDocker_registry_image(ctx, "thisdocker_registry_image", &docker.Docker_registry_imageArgs{
    			Name: fmt.Sprintf("%v/sample:latest", azurerm_container_registry.This.Login_server),
    			Build: []map[string]interface{}{
    				nil,
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "thisCluster", &databricks.ClusterArgs{
    			DockerImage: &databricks.ClusterDockerImageArgs{
    				Url: thisdocker_registry_image.Name,
    				BasicAuth: &databricks.ClusterDockerImageBasicAuthArgs{
    					Username: pulumi.Any(azurerm_container_registry.This.Admin_username),
    					Password: pulumi.Any(azurerm_container_registry.This.Admin_password),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.docker.docker_registry_image;
    import com.pulumi.docker.Docker_registry_imageArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterDockerImageArgs;
    import com.pulumi.databricks.inputs.ClusterDockerImageBasicAuthArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var thisdocker_registry_image = new Docker_registry_image("thisdocker_registry_image", Docker_registry_imageArgs.builder()        
                .name(String.format("%s/sample:latest", azurerm_container_registry.this().login_server()))
                .build()
                .build());
    
            var thisCluster = new Cluster("thisCluster", ClusterArgs.builder()        
                .dockerImage(ClusterDockerImageArgs.builder()
                    .url(thisdocker_registry_image.name())
                    .basicAuth(ClusterDockerImageBasicAuthArgs.builder()
                        .username(azurerm_container_registry.this().admin_username())
                        .password(azurerm_container_registry.this().admin_password())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      thisdocker_registry_image:
        type: docker:docker_registry_image
        properties:
          name: ${azurerm_container_registry.this.login_server}/sample:latest
          build:
            - {}
      thisCluster:
        type: databricks:Cluster
        properties:
          dockerImage:
            url: ${thisdocker_registry_image.name}
            basicAuth:
              username: ${azurerm_container_registry.this.admin_username}
              password: ${azurerm_container_registry.this.admin_password}
    
    url String
    URL for the Docker image
    basicAuth ClusterDockerImageBasicAuth

    basic_auth.username and basic_auth.password for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.

    Example usage with azurerm_container_registry, that you can adapt to your specific use-case:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    import * as docker from "@pulumi/docker";
    

    const thisdocker_registry_image = new docker.index.Docker_registry_image("thisdocker_registry_image", { name: ${azurerm_container_registry[&quot;this&quot;].login_server}/sample:latest, build: [{}], }); const thisCluster = new databricks.Cluster("thisCluster", {dockerImage: { url: thisdocker_registry_image.name, basicAuth: { username: azurerm_container_registry["this"].admin_username, password: azurerm_container_registry["this"].admin_password, }, }});

    import pulumi
    import pulumi_databricks as databricks
    import pulumi_docker as docker
    
    thisdocker_registry_image = docker.index.Docker_registry_image("thisdocker_registry_image",
        name=f{azurerm_container_registry.this.login_server}/sample:latest,
        build=[{}])
    this_cluster = databricks.Cluster("thisCluster", docker_image=databricks.ClusterDockerImageArgs(
        url=thisdocker_registry_image["name"],
        basic_auth=databricks.ClusterDockerImageBasicAuthArgs(
            username=azurerm_container_registry["this"]["admin_username"],
            password=azurerm_container_registry["this"]["admin_password"],
        ),
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    using Docker = Pulumi.Docker;
    
    return await Deployment.RunAsync(() => 
    {
        var thisdocker_registry_image = new Docker.Index.Docker_registry_image("thisdocker_registry_image", new()
        {
            Name = $"{azurerm_container_registry.This.Login_server}/sample:latest",
            Build = new[]
            {
                null,
            },
        });
    
        var thisCluster = new Databricks.Cluster("thisCluster", new()
        {
            DockerImage = new Databricks.Inputs.ClusterDockerImageArgs
            {
                Url = thisdocker_registry_image.Name,
                BasicAuth = new Databricks.Inputs.ClusterDockerImageBasicAuthArgs
                {
                    Username = azurerm_container_registry.This.Admin_username,
                    Password = azurerm_container_registry.This.Admin_password,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-docker/sdk/v4/go/docker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		thisdocker_registry_image, err := docker.NewDocker_registry_image(ctx, "thisdocker_registry_image", &docker.Docker_registry_imageArgs{
    			Name: fmt.Sprintf("%v/sample:latest", azurerm_container_registry.This.Login_server),
    			Build: []map[string]interface{}{
    				nil,
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "thisCluster", &databricks.ClusterArgs{
    			DockerImage: &databricks.ClusterDockerImageArgs{
    				Url: thisdocker_registry_image.Name,
    				BasicAuth: &databricks.ClusterDockerImageBasicAuthArgs{
    					Username: pulumi.Any(azurerm_container_registry.This.Admin_username),
    					Password: pulumi.Any(azurerm_container_registry.This.Admin_password),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.docker.docker_registry_image;
    import com.pulumi.docker.Docker_registry_imageArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterDockerImageArgs;
    import com.pulumi.databricks.inputs.ClusterDockerImageBasicAuthArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var thisdocker_registry_image = new Docker_registry_image("thisdocker_registry_image", Docker_registry_imageArgs.builder()        
                .name(String.format("%s/sample:latest", azurerm_container_registry.this().login_server()))
                .build()
                .build());
    
            var thisCluster = new Cluster("thisCluster", ClusterArgs.builder()        
                .dockerImage(ClusterDockerImageArgs.builder()
                    .url(thisdocker_registry_image.name())
                    .basicAuth(ClusterDockerImageBasicAuthArgs.builder()
                        .username(azurerm_container_registry.this().admin_username())
                        .password(azurerm_container_registry.this().admin_password())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      thisdocker_registry_image:
        type: docker:docker_registry_image
        properties:
          name: ${azurerm_container_registry.this.login_server}/sample:latest
          build:
            - {}
      thisCluster:
        type: databricks:Cluster
        properties:
          dockerImage:
            url: ${thisdocker_registry_image.name}
            basicAuth:
              username: ${azurerm_container_registry.this.admin_username}
              password: ${azurerm_container_registry.this.admin_password}
    
    url string
    URL for the Docker image
    basicAuth ClusterDockerImageBasicAuth

    basic_auth.username and basic_auth.password for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.

    Example usage with azurerm_container_registry, that you can adapt to your specific use-case:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    import * as docker from "@pulumi/docker";
    

    const thisdocker_registry_image = new docker.index.Docker_registry_image("thisdocker_registry_image", { name: ${azurerm_container_registry[&quot;this&quot;].login_server}/sample:latest, build: [{}], }); const thisCluster = new databricks.Cluster("thisCluster", {dockerImage: { url: thisdocker_registry_image.name, basicAuth: { username: azurerm_container_registry["this"].admin_username, password: azurerm_container_registry["this"].admin_password, }, }});

    import pulumi
    import pulumi_databricks as databricks
    import pulumi_docker as docker
    
    thisdocker_registry_image = docker.index.Docker_registry_image("thisdocker_registry_image",
        name=f{azurerm_container_registry.this.login_server}/sample:latest,
        build=[{}])
    this_cluster = databricks.Cluster("thisCluster", docker_image=databricks.ClusterDockerImageArgs(
        url=thisdocker_registry_image["name"],
        basic_auth=databricks.ClusterDockerImageBasicAuthArgs(
            username=azurerm_container_registry["this"]["admin_username"],
            password=azurerm_container_registry["this"]["admin_password"],
        ),
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    using Docker = Pulumi.Docker;
    
    return await Deployment.RunAsync(() => 
    {
        var thisdocker_registry_image = new Docker.Index.Docker_registry_image("thisdocker_registry_image", new()
        {
            Name = $"{azurerm_container_registry.This.Login_server}/sample:latest",
            Build = new[]
            {
                null,
            },
        });
    
        var thisCluster = new Databricks.Cluster("thisCluster", new()
        {
            DockerImage = new Databricks.Inputs.ClusterDockerImageArgs
            {
                Url = thisdocker_registry_image.Name,
                BasicAuth = new Databricks.Inputs.ClusterDockerImageBasicAuthArgs
                {
                    Username = azurerm_container_registry.This.Admin_username,
                    Password = azurerm_container_registry.This.Admin_password,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-docker/sdk/v4/go/docker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		thisdocker_registry_image, err := docker.NewDocker_registry_image(ctx, "thisdocker_registry_image", &docker.Docker_registry_imageArgs{
    			Name: fmt.Sprintf("%v/sample:latest", azurerm_container_registry.This.Login_server),
    			Build: []map[string]interface{}{
    				nil,
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "thisCluster", &databricks.ClusterArgs{
    			DockerImage: &databricks.ClusterDockerImageArgs{
    				Url: thisdocker_registry_image.Name,
    				BasicAuth: &databricks.ClusterDockerImageBasicAuthArgs{
    					Username: pulumi.Any(azurerm_container_registry.This.Admin_username),
    					Password: pulumi.Any(azurerm_container_registry.This.Admin_password),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.docker.docker_registry_image;
    import com.pulumi.docker.Docker_registry_imageArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterDockerImageArgs;
    import com.pulumi.databricks.inputs.ClusterDockerImageBasicAuthArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var thisdocker_registry_image = new Docker_registry_image("thisdocker_registry_image", Docker_registry_imageArgs.builder()        
                .name(String.format("%s/sample:latest", azurerm_container_registry.this().login_server()))
                .build()
                .build());
    
            var thisCluster = new Cluster("thisCluster", ClusterArgs.builder()        
                .dockerImage(ClusterDockerImageArgs.builder()
                    .url(thisdocker_registry_image.name())
                    .basicAuth(ClusterDockerImageBasicAuthArgs.builder()
                        .username(azurerm_container_registry.this().admin_username())
                        .password(azurerm_container_registry.this().admin_password())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      thisdocker_registry_image:
        type: docker:docker_registry_image
        properties:
          name: ${azurerm_container_registry.this.login_server}/sample:latest
          build:
            - {}
      thisCluster:
        type: databricks:Cluster
        properties:
          dockerImage:
            url: ${thisdocker_registry_image.name}
            basicAuth:
              username: ${azurerm_container_registry.this.admin_username}
              password: ${azurerm_container_registry.this.admin_password}
    
    url str
    URL for the Docker image
    basic_auth ClusterDockerImageBasicAuth

    basic_auth.username and basic_auth.password for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.

    Example usage with azurerm_container_registry, that you can adapt to your specific use-case:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    import * as docker from "@pulumi/docker";
    

    const thisdocker_registry_image = new docker.index.Docker_registry_image("thisdocker_registry_image", { name: ${azurerm_container_registry[&quot;this&quot;].login_server}/sample:latest, build: [{}], }); const thisCluster = new databricks.Cluster("thisCluster", {dockerImage: { url: thisdocker_registry_image.name, basicAuth: { username: azurerm_container_registry["this"].admin_username, password: azurerm_container_registry["this"].admin_password, }, }});

    import pulumi
    import pulumi_databricks as databricks
    import pulumi_docker as docker
    
    thisdocker_registry_image = docker.index.Docker_registry_image("thisdocker_registry_image",
        name=f{azurerm_container_registry.this.login_server}/sample:latest,
        build=[{}])
    this_cluster = databricks.Cluster("thisCluster", docker_image=databricks.ClusterDockerImageArgs(
        url=thisdocker_registry_image["name"],
        basic_auth=databricks.ClusterDockerImageBasicAuthArgs(
            username=azurerm_container_registry["this"]["admin_username"],
            password=azurerm_container_registry["this"]["admin_password"],
        ),
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    using Docker = Pulumi.Docker;
    
    return await Deployment.RunAsync(() => 
    {
        var thisdocker_registry_image = new Docker.Index.Docker_registry_image("thisdocker_registry_image", new()
        {
            Name = $"{azurerm_container_registry.This.Login_server}/sample:latest",
            Build = new[]
            {
                null,
            },
        });
    
        var thisCluster = new Databricks.Cluster("thisCluster", new()
        {
            DockerImage = new Databricks.Inputs.ClusterDockerImageArgs
            {
                Url = thisdocker_registry_image.Name,
                BasicAuth = new Databricks.Inputs.ClusterDockerImageBasicAuthArgs
                {
                    Username = azurerm_container_registry.This.Admin_username,
                    Password = azurerm_container_registry.This.Admin_password,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-docker/sdk/v4/go/docker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		thisdocker_registry_image, err := docker.NewDocker_registry_image(ctx, "thisdocker_registry_image", &docker.Docker_registry_imageArgs{
    			Name: fmt.Sprintf("%v/sample:latest", azurerm_container_registry.This.Login_server),
    			Build: []map[string]interface{}{
    				nil,
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "thisCluster", &databricks.ClusterArgs{
    			DockerImage: &databricks.ClusterDockerImageArgs{
    				Url: thisdocker_registry_image.Name,
    				BasicAuth: &databricks.ClusterDockerImageBasicAuthArgs{
    					Username: pulumi.Any(azurerm_container_registry.This.Admin_username),
    					Password: pulumi.Any(azurerm_container_registry.This.Admin_password),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.docker.docker_registry_image;
    import com.pulumi.docker.Docker_registry_imageArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterDockerImageArgs;
    import com.pulumi.databricks.inputs.ClusterDockerImageBasicAuthArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var thisdocker_registry_image = new Docker_registry_image("thisdocker_registry_image", Docker_registry_imageArgs.builder()        
                .name(String.format("%s/sample:latest", azurerm_container_registry.this().login_server()))
                .build()
                .build());
    
            var thisCluster = new Cluster("thisCluster", ClusterArgs.builder()        
                .dockerImage(ClusterDockerImageArgs.builder()
                    .url(thisdocker_registry_image.name())
                    .basicAuth(ClusterDockerImageBasicAuthArgs.builder()
                        .username(azurerm_container_registry.this().admin_username())
                        .password(azurerm_container_registry.this().admin_password())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      thisdocker_registry_image:
        type: docker:docker_registry_image
        properties:
          name: ${azurerm_container_registry.this.login_server}/sample:latest
          build:
            - {}
      thisCluster:
        type: databricks:Cluster
        properties:
          dockerImage:
            url: ${thisdocker_registry_image.name}
            basicAuth:
              username: ${azurerm_container_registry.this.admin_username}
              password: ${azurerm_container_registry.this.admin_password}
    
    url String
    URL for the Docker image
    basicAuth Property Map

    basic_auth.username and basic_auth.password for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.

    Example usage with azurerm_container_registry, that you can adapt to your specific use-case:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    import * as docker from "@pulumi/docker";
    

    const thisdocker_registry_image = new docker.index.Docker_registry_image("thisdocker_registry_image", { name: ${azurerm_container_registry[&quot;this&quot;].login_server}/sample:latest, build: [{}], }); const thisCluster = new databricks.Cluster("thisCluster", {dockerImage: { url: thisdocker_registry_image.name, basicAuth: { username: azurerm_container_registry["this"].admin_username, password: azurerm_container_registry["this"].admin_password, }, }});

    import pulumi
    import pulumi_databricks as databricks
    import pulumi_docker as docker
    
    thisdocker_registry_image = docker.index.Docker_registry_image("thisdocker_registry_image",
        name=f{azurerm_container_registry.this.login_server}/sample:latest,
        build=[{}])
    this_cluster = databricks.Cluster("thisCluster", docker_image=databricks.ClusterDockerImageArgs(
        url=thisdocker_registry_image["name"],
        basic_auth=databricks.ClusterDockerImageBasicAuthArgs(
            username=azurerm_container_registry["this"]["admin_username"],
            password=azurerm_container_registry["this"]["admin_password"],
        ),
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    using Docker = Pulumi.Docker;
    
    return await Deployment.RunAsync(() => 
    {
        var thisdocker_registry_image = new Docker.Index.Docker_registry_image("thisdocker_registry_image", new()
        {
            Name = $"{azurerm_container_registry.This.Login_server}/sample:latest",
            Build = new[]
            {
                null,
            },
        });
    
        var thisCluster = new Databricks.Cluster("thisCluster", new()
        {
            DockerImage = new Databricks.Inputs.ClusterDockerImageArgs
            {
                Url = thisdocker_registry_image.Name,
                BasicAuth = new Databricks.Inputs.ClusterDockerImageBasicAuthArgs
                {
                    Username = azurerm_container_registry.This.Admin_username,
                    Password = azurerm_container_registry.This.Admin_password,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-docker/sdk/v4/go/docker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		thisdocker_registry_image, err := docker.NewDocker_registry_image(ctx, "thisdocker_registry_image", &docker.Docker_registry_imageArgs{
    			Name: fmt.Sprintf("%v/sample:latest", azurerm_container_registry.This.Login_server),
    			Build: []map[string]interface{}{
    				nil,
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewCluster(ctx, "thisCluster", &databricks.ClusterArgs{
    			DockerImage: &databricks.ClusterDockerImageArgs{
    				Url: thisdocker_registry_image.Name,
    				BasicAuth: &databricks.ClusterDockerImageBasicAuthArgs{
    					Username: pulumi.Any(azurerm_container_registry.This.Admin_username),
    					Password: pulumi.Any(azurerm_container_registry.This.Admin_password),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.docker.docker_registry_image;
    import com.pulumi.docker.Docker_registry_imageArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterDockerImageArgs;
    import com.pulumi.databricks.inputs.ClusterDockerImageBasicAuthArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var thisdocker_registry_image = new Docker_registry_image("thisdocker_registry_image", Docker_registry_imageArgs.builder()        
                .name(String.format("%s/sample:latest", azurerm_container_registry.this().login_server()))
                .build()
                .build());
    
            var thisCluster = new Cluster("thisCluster", ClusterArgs.builder()        
                .dockerImage(ClusterDockerImageArgs.builder()
                    .url(thisdocker_registry_image.name())
                    .basicAuth(ClusterDockerImageBasicAuthArgs.builder()
                        .username(azurerm_container_registry.this().admin_username())
                        .password(azurerm_container_registry.this().admin_password())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      thisdocker_registry_image:
        type: docker:docker_registry_image
        properties:
          name: ${azurerm_container_registry.this.login_server}/sample:latest
          build:
            - {}
      thisCluster:
        type: databricks:Cluster
        properties:
          dockerImage:
            url: ${thisdocker_registry_image.name}
            basicAuth:
              username: ${azurerm_container_registry.this.admin_username}
              password: ${azurerm_container_registry.this.admin_password}
    

    ClusterDockerImageBasicAuth, ClusterDockerImageBasicAuthArgs

    Password string
    Username string
    Password string
    Username string
    password String
    username String
    password string
    username string
    password String
    username String

    ClusterGcpAttributes, ClusterGcpAttributesArgs

    Availability string
    Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.
    BootDiskSize int
    Boot disk size in GB
    GoogleServiceAccount string
    Google Service Account email address that the cluster uses to authenticate with Google Identity. This field is used for authentication with the GCS and BigQuery data sources.
    LocalSsdCount int
    Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
    UsePreemptibleExecutors bool
    if we should use preemptible executors (GCP documentation). Warning: this field is deprecated in favor of availability, and will be removed soon.
    ZoneId string
    Identifier for the availability zone in which the cluster resides. This can be one of the following:
    Availability string
    Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.
    BootDiskSize int
    Boot disk size in GB
    GoogleServiceAccount string
    Google Service Account email address that the cluster uses to authenticate with Google Identity. This field is used for authentication with the GCS and BigQuery data sources.
    LocalSsdCount int
    Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
    UsePreemptibleExecutors bool
    if we should use preemptible executors (GCP documentation). Warning: this field is deprecated in favor of availability, and will be removed soon.
    ZoneId string
    Identifier for the availability zone in which the cluster resides. This can be one of the following:
    availability String
    Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.
    bootDiskSize Integer
    Boot disk size in GB
    googleServiceAccount String
    Google Service Account email address that the cluster uses to authenticate with Google Identity. This field is used for authentication with the GCS and BigQuery data sources.
    localSsdCount Integer
    Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
    usePreemptibleExecutors Boolean
    if we should use preemptible executors (GCP documentation). Warning: this field is deprecated in favor of availability, and will be removed soon.
    zoneId String
    Identifier for the availability zone in which the cluster resides. This can be one of the following:
    availability string
    Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.
    bootDiskSize number
    Boot disk size in GB
    googleServiceAccount string
    Google Service Account email address that the cluster uses to authenticate with Google Identity. This field is used for authentication with the GCS and BigQuery data sources.
    localSsdCount number
    Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
    usePreemptibleExecutors boolean
    if we should use preemptible executors (GCP documentation). Warning: this field is deprecated in favor of availability, and will be removed soon.
    zoneId string
    Identifier for the availability zone in which the cluster resides. This can be one of the following:
    availability str
    Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.
    boot_disk_size int
    Boot disk size in GB
    google_service_account str
    Google Service Account email address that the cluster uses to authenticate with Google Identity. This field is used for authentication with the GCS and BigQuery data sources.
    local_ssd_count int
    Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
    use_preemptible_executors bool
    if we should use preemptible executors (GCP documentation). Warning: this field is deprecated in favor of availability, and will be removed soon.
    zone_id str
    Identifier for the availability zone in which the cluster resides. This can be one of the following:
    availability String
    Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.
    bootDiskSize Number
    Boot disk size in GB
    googleServiceAccount String
    Google Service Account email address that the cluster uses to authenticate with Google Identity. This field is used for authentication with the GCS and BigQuery data sources.
    localSsdCount Number
    Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
    usePreemptibleExecutors Boolean
    if we should use preemptible executors (GCP documentation). Warning: this field is deprecated in favor of availability, and will be removed soon.
    zoneId String
    Identifier for the availability zone in which the cluster resides. This can be one of the following:

    ClusterInitScript, ClusterInitScriptArgs

    abfss Property Map
    dbfs Property Map

    Deprecated: For init scripts use 'volumes', 'workspace' or cloud storage location instead of 'dbfs'.

    file Property Map
    gcs Property Map
    s3 Property Map
    volumes Property Map
    workspace Property Map

    ClusterInitScriptAbfss, ClusterInitScriptAbfssArgs

    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination str
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.

    ClusterInitScriptDbfs, ClusterInitScriptDbfsArgs

    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination str
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.

    ClusterInitScriptFile, ClusterInitScriptFileArgs

    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination str
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.

    ClusterInitScriptGcs, ClusterInitScriptGcsArgs

    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination str
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.

    ClusterInitScriptS3, ClusterInitScriptS3Args

    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    CannedAcl string
    Set canned access control list, e.g. bucket-owner-full-control. If canned_cal is set, the cluster instance profile must have s3:PutObjectAcl permission on the destination bucket and prefix. The full list of possible canned ACLs can be found here. By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set bucket-owner-full-control to make bucket owners able to read the logs.
    EnableEncryption bool
    Enable server-side encryption, false by default.
    EncryptionType string
    The encryption type, it could be sse-s3 or sse-kms. It is used only when encryption is enabled, and the default type is sse-s3.
    Endpoint string
    S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or endpoint needs to be set. If both are set, the endpoint is used.
    KmsKey string
    KMS key used if encryption is enabled and encryption type is set to sse-kms.
    Region string
    S3 region, e.g. us-west-2. Either region or endpoint must be set. If both are set, the endpoint is used.
    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    CannedAcl string
    Set canned access control list, e.g. bucket-owner-full-control. If canned_cal is set, the cluster instance profile must have s3:PutObjectAcl permission on the destination bucket and prefix. The full list of possible canned ACLs can be found here. By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set bucket-owner-full-control to make bucket owners able to read the logs.
    EnableEncryption bool
    Enable server-side encryption, false by default.
    EncryptionType string
    The encryption type, it could be sse-s3 or sse-kms. It is used only when encryption is enabled, and the default type is sse-s3.
    Endpoint string
    S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or endpoint needs to be set. If both are set, the endpoint is used.
    KmsKey string
    KMS key used if encryption is enabled and encryption type is set to sse-kms.
    Region string
    S3 region, e.g. us-west-2. Either region or endpoint must be set. If both are set, the endpoint is used.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    cannedAcl String
    Set canned access control list, e.g. bucket-owner-full-control. If canned_cal is set, the cluster instance profile must have s3:PutObjectAcl permission on the destination bucket and prefix. The full list of possible canned ACLs can be found here. By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set bucket-owner-full-control to make bucket owners able to read the logs.
    enableEncryption Boolean
    Enable server-side encryption, false by default.
    encryptionType String
    The encryption type, it could be sse-s3 or sse-kms. It is used only when encryption is enabled, and the default type is sse-s3.
    endpoint String
    S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or endpoint needs to be set. If both are set, the endpoint is used.
    kmsKey String
    KMS key used if encryption is enabled and encryption type is set to sse-kms.
    region String
    S3 region, e.g. us-west-2. Either region or endpoint must be set. If both are set, the endpoint is used.
    destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    cannedAcl string
    Set canned access control list, e.g. bucket-owner-full-control. If canned_cal is set, the cluster instance profile must have s3:PutObjectAcl permission on the destination bucket and prefix. The full list of possible canned ACLs can be found here. By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set bucket-owner-full-control to make bucket owners able to read the logs.
    enableEncryption boolean
    Enable server-side encryption, false by default.
    encryptionType string
    The encryption type, it could be sse-s3 or sse-kms. It is used only when encryption is enabled, and the default type is sse-s3.
    endpoint string
    S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or endpoint needs to be set. If both are set, the endpoint is used.
    kmsKey string
    KMS key used if encryption is enabled and encryption type is set to sse-kms.
    region string
    S3 region, e.g. us-west-2. Either region or endpoint must be set. If both are set, the endpoint is used.
    destination str
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    canned_acl str
    Set canned access control list, e.g. bucket-owner-full-control. If canned_cal is set, the cluster instance profile must have s3:PutObjectAcl permission on the destination bucket and prefix. The full list of possible canned ACLs can be found here. By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set bucket-owner-full-control to make bucket owners able to read the logs.
    enable_encryption bool
    Enable server-side encryption, false by default.
    encryption_type str
    The encryption type, it could be sse-s3 or sse-kms. It is used only when encryption is enabled, and the default type is sse-s3.
    endpoint str
    S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or endpoint needs to be set. If both are set, the endpoint is used.
    kms_key str
    KMS key used if encryption is enabled and encryption type is set to sse-kms.
    region str
    S3 region, e.g. us-west-2. Either region or endpoint must be set. If both are set, the endpoint is used.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    cannedAcl String
    Set canned access control list, e.g. bucket-owner-full-control. If canned_cal is set, the cluster instance profile must have s3:PutObjectAcl permission on the destination bucket and prefix. The full list of possible canned ACLs can be found here. By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set bucket-owner-full-control to make bucket owners able to read the logs.
    enableEncryption Boolean
    Enable server-side encryption, false by default.
    encryptionType String
    The encryption type, it could be sse-s3 or sse-kms. It is used only when encryption is enabled, and the default type is sse-s3.
    endpoint String
    S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or endpoint needs to be set. If both are set, the endpoint is used.
    kmsKey String
    KMS key used if encryption is enabled and encryption type is set to sse-kms.
    region String
    S3 region, e.g. us-west-2. Either region or endpoint must be set. If both are set, the endpoint is used.

    ClusterInitScriptVolumes, ClusterInitScriptVolumesArgs

    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination str
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.

    ClusterInitScriptWorkspace, ClusterInitScriptWorkspaceArgs

    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    Destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination string
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination str
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
    destination String
    S3 destination, e.g., s3://my-bucket/some-prefix You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.

    ClusterLibrary, ClusterLibraryArgs

    ClusterLibraryCran, ClusterLibraryCranArgs

    Package string
    Repo string
    Package string
    Repo string
    package_ String
    repo String
    package string
    repo string
    package str
    repo str
    package String
    repo String

    ClusterLibraryMaven, ClusterLibraryMavenArgs

    Coordinates string
    Exclusions List<string>
    Repo string
    Coordinates string
    Exclusions []string
    Repo string
    coordinates String
    exclusions List<String>
    repo String
    coordinates string
    exclusions string[]
    repo string
    coordinates str
    exclusions Sequence[str]
    repo str
    coordinates String
    exclusions List<String>
    repo String

    ClusterLibraryPypi, ClusterLibraryPypiArgs

    Package string
    Repo string
    Package string
    Repo string
    package_ String
    repo String
    package string
    repo string
    package str
    repo str
    package String
    repo String

    ClusterWorkloadType, ClusterWorkloadTypeArgs

    ClusterWorkloadTypeClients, ClusterWorkloadTypeClientsArgs

    Jobs bool
    boolean flag defining if it's possible to run Databricks Jobs on this cluster. Default: true.

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const withNfs = new databricks.Cluster("withNfs", {workloadType: { clients: { jobs: false, notebooks: true, }, }});

    import pulumi
    import pulumi_databricks as databricks
    
    with_nfs = databricks.Cluster("withNfs", workload_type=databricks.ClusterWorkloadTypeArgs(
        clients=databricks.ClusterWorkloadTypeClientsArgs(
            jobs=False,
            notebooks=True,
        ),
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var withNfs = new Databricks.Cluster("withNfs", new()
        {
            WorkloadType = new Databricks.Inputs.ClusterWorkloadTypeArgs
            {
                Clients = new Databricks.Inputs.ClusterWorkloadTypeClientsArgs
                {
                    Jobs = false,
                    Notebooks = true,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "withNfs", &databricks.ClusterArgs{
    			WorkloadType: &databricks.ClusterWorkloadTypeArgs{
    				Clients: &databricks.ClusterWorkloadTypeClientsArgs{
    					Jobs:      pulumi.Bool(false),
    					Notebooks: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterWorkloadTypeArgs;
    import com.pulumi.databricks.inputs.ClusterWorkloadTypeClientsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var withNfs = new Cluster("withNfs", ClusterArgs.builder()        
                .workloadType(ClusterWorkloadTypeArgs.builder()
                    .clients(ClusterWorkloadTypeClientsArgs.builder()
                        .jobs(false)
                        .notebooks(true)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      withNfs:
        type: databricks:Cluster
        properties:
          # ...
          workloadType:
            clients:
              jobs: false
              notebooks: true
    
    title="Optional"> <span id="notebooks_csharp">

    Notebooks bool

    boolean flag defining if it’s possible to run notebooks on this cluster. Default: true.

    Jobs bool
    boolean flag defining if it's possible to run Databricks Jobs on this cluster. Default: true.

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const withNfs = new databricks.Cluster("withNfs", {workloadType: { clients: { jobs: false, notebooks: true, }, }});

    import pulumi
    import pulumi_databricks as databricks
    
    with_nfs = databricks.Cluster("withNfs", workload_type=databricks.ClusterWorkloadTypeArgs(
        clients=databricks.ClusterWorkloadTypeClientsArgs(
            jobs=False,
            notebooks=True,
        ),
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var withNfs = new Databricks.Cluster("withNfs", new()
        {
            WorkloadType = new Databricks.Inputs.ClusterWorkloadTypeArgs
            {
                Clients = new Databricks.Inputs.ClusterWorkloadTypeClientsArgs
                {
                    Jobs = false,
                    Notebooks = true,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "withNfs", &databricks.ClusterArgs{
    			WorkloadType: &databricks.ClusterWorkloadTypeArgs{
    				Clients: &databricks.ClusterWorkloadTypeClientsArgs{
    					Jobs:      pulumi.Bool(false),
    					Notebooks: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterWorkloadTypeArgs;
    import com.pulumi.databricks.inputs.ClusterWorkloadTypeClientsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var withNfs = new Cluster("withNfs", ClusterArgs.builder()        
                .workloadType(ClusterWorkloadTypeArgs.builder()
                    .clients(ClusterWorkloadTypeClientsArgs.builder()
                        .jobs(false)
                        .notebooks(true)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      withNfs:
        type: databricks:Cluster
        properties:
          # ...
          workloadType:
            clients:
              jobs: false
              notebooks: true
    
    title="Optional"> <span id="notebooks_go">

    Notebooks bool

    boolean flag defining if it’s possible to run notebooks on this cluster. Default: true.

    jobs Boolean
    boolean flag defining if it's possible to run Databricks Jobs on this cluster. Default: true.

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const withNfs = new databricks.Cluster("withNfs", {workloadType: { clients: { jobs: false, notebooks: true, }, }});

    import pulumi
    import pulumi_databricks as databricks
    
    with_nfs = databricks.Cluster("withNfs", workload_type=databricks.ClusterWorkloadTypeArgs(
        clients=databricks.ClusterWorkloadTypeClientsArgs(
            jobs=False,
            notebooks=True,
        ),
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var withNfs = new Databricks.Cluster("withNfs", new()
        {
            WorkloadType = new Databricks.Inputs.ClusterWorkloadTypeArgs
            {
                Clients = new Databricks.Inputs.ClusterWorkloadTypeClientsArgs
                {
                    Jobs = false,
                    Notebooks = true,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "withNfs", &databricks.ClusterArgs{
    			WorkloadType: &databricks.ClusterWorkloadTypeArgs{
    				Clients: &databricks.ClusterWorkloadTypeClientsArgs{
    					Jobs:      pulumi.Bool(false),
    					Notebooks: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterWorkloadTypeArgs;
    import com.pulumi.databricks.inputs.ClusterWorkloadTypeClientsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var withNfs = new Cluster("withNfs", ClusterArgs.builder()        
                .workloadType(ClusterWorkloadTypeArgs.builder()
                    .clients(ClusterWorkloadTypeClientsArgs.builder()
                        .jobs(false)
                        .notebooks(true)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      withNfs:
        type: databricks:Cluster
        properties:
          # ...
          workloadType:
            clients:
              jobs: false
              notebooks: true
    
    title="Optional"> <span id="notebooks_java">

    notebooks Boolean

    boolean flag defining if it’s possible to run notebooks on this cluster. Default: true.

    jobs boolean
    boolean flag defining if it's possible to run Databricks Jobs on this cluster. Default: true.

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const withNfs = new databricks.Cluster("withNfs", {workloadType: { clients: { jobs: false, notebooks: true, }, }});

    import pulumi
    import pulumi_databricks as databricks
    
    with_nfs = databricks.Cluster("withNfs", workload_type=databricks.ClusterWorkloadTypeArgs(
        clients=databricks.ClusterWorkloadTypeClientsArgs(
            jobs=False,
            notebooks=True,
        ),
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var withNfs = new Databricks.Cluster("withNfs", new()
        {
            WorkloadType = new Databricks.Inputs.ClusterWorkloadTypeArgs
            {
                Clients = new Databricks.Inputs.ClusterWorkloadTypeClientsArgs
                {
                    Jobs = false,
                    Notebooks = true,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "withNfs", &databricks.ClusterArgs{
    			WorkloadType: &databricks.ClusterWorkloadTypeArgs{
    				Clients: &databricks.ClusterWorkloadTypeClientsArgs{
    					Jobs:      pulumi.Bool(false),
    					Notebooks: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterWorkloadTypeArgs;
    import com.pulumi.databricks.inputs.ClusterWorkloadTypeClientsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var withNfs = new Cluster("withNfs", ClusterArgs.builder()        
                .workloadType(ClusterWorkloadTypeArgs.builder()
                    .clients(ClusterWorkloadTypeClientsArgs.builder()
                        .jobs(false)
                        .notebooks(true)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      withNfs:
        type: databricks:Cluster
        properties:
          # ...
          workloadType:
            clients:
              jobs: false
              notebooks: true
    
    title="Optional"> <span id="notebooks_nodejs">

    notebooks boolean

    boolean flag defining if it’s possible to run notebooks on this cluster. Default: true.

    jobs bool
    boolean flag defining if it's possible to run Databricks Jobs on this cluster. Default: true.

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const withNfs = new databricks.Cluster("withNfs", {workloadType: { clients: { jobs: false, notebooks: true, }, }});

    import pulumi
    import pulumi_databricks as databricks
    
    with_nfs = databricks.Cluster("withNfs", workload_type=databricks.ClusterWorkloadTypeArgs(
        clients=databricks.ClusterWorkloadTypeClientsArgs(
            jobs=False,
            notebooks=True,
        ),
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var withNfs = new Databricks.Cluster("withNfs", new()
        {
            WorkloadType = new Databricks.Inputs.ClusterWorkloadTypeArgs
            {
                Clients = new Databricks.Inputs.ClusterWorkloadTypeClientsArgs
                {
                    Jobs = false,
                    Notebooks = true,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "withNfs", &databricks.ClusterArgs{
    			WorkloadType: &databricks.ClusterWorkloadTypeArgs{
    				Clients: &databricks.ClusterWorkloadTypeClientsArgs{
    					Jobs:      pulumi.Bool(false),
    					Notebooks: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterWorkloadTypeArgs;
    import com.pulumi.databricks.inputs.ClusterWorkloadTypeClientsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var withNfs = new Cluster("withNfs", ClusterArgs.builder()        
                .workloadType(ClusterWorkloadTypeArgs.builder()
                    .clients(ClusterWorkloadTypeClientsArgs.builder()
                        .jobs(false)
                        .notebooks(true)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      withNfs:
        type: databricks:Cluster
        properties:
          # ...
          workloadType:
            clients:
              jobs: false
              notebooks: true
    
    title="Optional"> <span id="notebooks_python">

    notebooks bool

    boolean flag defining if it’s possible to run notebooks on this cluster. Default: true.

    jobs Boolean
    boolean flag defining if it's possible to run Databricks Jobs on this cluster. Default: true.

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    

    const withNfs = new databricks.Cluster("withNfs", {workloadType: { clients: { jobs: false, notebooks: true, }, }});

    import pulumi
    import pulumi_databricks as databricks
    
    with_nfs = databricks.Cluster("withNfs", workload_type=databricks.ClusterWorkloadTypeArgs(
        clients=databricks.ClusterWorkloadTypeClientsArgs(
            jobs=False,
            notebooks=True,
        ),
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var withNfs = new Databricks.Cluster("withNfs", new()
        {
            WorkloadType = new Databricks.Inputs.ClusterWorkloadTypeArgs
            {
                Clients = new Databricks.Inputs.ClusterWorkloadTypeClientsArgs
                {
                    Jobs = false,
                    Notebooks = true,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewCluster(ctx, "withNfs", &databricks.ClusterArgs{
    			WorkloadType: &databricks.ClusterWorkloadTypeArgs{
    				Clients: &databricks.ClusterWorkloadTypeClientsArgs{
    					Jobs:      pulumi.Bool(false),
    					Notebooks: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.inputs.ClusterWorkloadTypeArgs;
    import com.pulumi.databricks.inputs.ClusterWorkloadTypeClientsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var withNfs = new Cluster("withNfs", ClusterArgs.builder()        
                .workloadType(ClusterWorkloadTypeArgs.builder()
                    .clients(ClusterWorkloadTypeClientsArgs.builder()
                        .jobs(false)
                        .notebooks(true)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      withNfs:
        type: databricks:Cluster
        properties:
          # ...
          workloadType:
            clients:
              jobs: false
              notebooks: true
    
    title="Optional"> <span id="notebooks_yaml">

    notebooks Boolean

    boolean flag defining if it’s possible to run notebooks on this cluster. Default: true.

    Import

    The resource cluster can be imported using cluster id.

    bash

    $ pulumi import databricks:index/cluster:Cluster this <cluster-id>
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    databricks pulumi/pulumi-databricks
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the databricks Terraform Provider.
    databricks logo
    Databricks v1.35.0 published on Friday, Mar 29, 2024 by Pulumi