1. Packages
  2. Databricks
  3. API Docs
  4. Mount
Databricks v1.34.0 published on Tuesday, Mar 5, 2024 by Pulumi

databricks.Mount

Explore with Pulumi AI

databricks logo
Databricks v1.34.0 published on Tuesday, Mar 5, 2024 by Pulumi

    This resource will mount your cloud storage

    1. Use generic arguments - you have a responsibility for providing all necessary parameters that are required to mount specific storage. This is most flexible option

    Common arguments

    • cluster_id - (Optional, String) Cluster to use for mounting. If no cluster is specified, a new cluster will be created and will mount the bucket for all of the clusters in this workspace. If the cluster is not running - it’s going to be started, so be aware to set auto-termination rules on it.
    • name - (Optional, String) Name, under which mount will be accessible in dbfs:/mnt/<MOUNT_NAME>. If not specified, provider will try to infer it from depending on the resource type:
      • bucket_name for AWS S3 and Google Cloud Storage
      • container_name for ADLS Gen2 and Azure Blob Storage
      • storage_resource_name for ADLS Gen1
    • uri - (Optional, String) the URI for accessing specific storage (s3a://...., abfss://...., gs://...., etc.)
    • extra_configs - (Optional, String map) configuration parameters that are necessary for mounting of specific storage
    • resource_id - (Optional, String) resource ID for a given storage account. Could be used to fill defaults, such as storage account & container names on Azure.
    • encryption_type - (Optional, String) encryption type. Currently used only for AWS S3 mounts

    Example mounting ADLS Gen2 using uri and extra_configs

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const tenantId = "00000000-1111-2222-3333-444444444444";
    const clientId = "55555555-6666-7777-8888-999999999999";
    const secretScope = "some-kv";
    const secretKey = "some-sp-secret";
    const container = "test";
    const storageAcc = "lrs";
    const _this = new databricks.Mount("this", {
        uri: `abfss://${container}@${storageAcc}.dfs.core.windows.net`,
        extraConfigs: {
            "fs.azure.account.auth.type": "OAuth",
            "fs.azure.account.oauth.provider.type": "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider",
            "fs.azure.account.oauth2.client.id": clientId,
            "fs.azure.account.oauth2.client.secret": `{{secrets/${secretScope}/${secretKey}}}`,
            "fs.azure.account.oauth2.client.endpoint": `https://login.microsoftonline.com/${tenantId}/oauth2/token`,
            "fs.azure.createRemoteFileSystemDuringInitialization": "false",
        },
    });
    
    import pulumi
    import pulumi_databricks as databricks
    
    tenant_id = "00000000-1111-2222-3333-444444444444"
    client_id = "55555555-6666-7777-8888-999999999999"
    secret_scope = "some-kv"
    secret_key = "some-sp-secret"
    container = "test"
    storage_acc = "lrs"
    this = databricks.Mount("this",
        uri=f"abfss://{container}@{storage_acc}.dfs.core.windows.net",
        extra_configs={
            "fs.azure.account.auth.type": "OAuth",
            "fs.azure.account.oauth.provider.type": "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider",
            "fs.azure.account.oauth2.client.id": client_id,
            "fs.azure.account.oauth2.client.secret": f"{{{{secrets/{secret_scope}/{secret_key}}}}}",
            "fs.azure.account.oauth2.client.endpoint": f"https://login.microsoftonline.com/{tenant_id}/oauth2/token",
            "fs.azure.createRemoteFileSystemDuringInitialization": "false",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var tenantId = "00000000-1111-2222-3333-444444444444";
    
        var clientId = "55555555-6666-7777-8888-999999999999";
    
        var secretScope = "some-kv";
    
        var secretKey = "some-sp-secret";
    
        var container = "test";
    
        var storageAcc = "lrs";
    
        var @this = new Databricks.Mount("this", new()
        {
            Uri = $"abfss://{container}@{storageAcc}.dfs.core.windows.net",
            ExtraConfigs = 
            {
                { "fs.azure.account.auth.type", "OAuth" },
                { "fs.azure.account.oauth.provider.type", "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider" },
                { "fs.azure.account.oauth2.client.id", clientId },
                { "fs.azure.account.oauth2.client.secret", $"{{{{secrets/{secretScope}/{secretKey}}}}}" },
                { "fs.azure.account.oauth2.client.endpoint", $"https://login.microsoftonline.com/{tenantId}/oauth2/token" },
                { "fs.azure.createRemoteFileSystemDuringInitialization", "false" },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		tenantId := "00000000-1111-2222-3333-444444444444"
    		clientId := "55555555-6666-7777-8888-999999999999"
    		secretScope := "some-kv"
    		secretKey := "some-sp-secret"
    		container := "test"
    		storageAcc := "lrs"
    		_, err := databricks.NewMount(ctx, "this", &databricks.MountArgs{
    			Uri: pulumi.String(fmt.Sprintf("abfss://%v@%v.dfs.core.windows.net", container, storageAcc)),
    			ExtraConfigs: pulumi.Map{
    				"fs.azure.account.auth.type":                          pulumi.Any("OAuth"),
    				"fs.azure.account.oauth.provider.type":                pulumi.Any("org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider"),
    				"fs.azure.account.oauth2.client.id":                   pulumi.String(clientId),
    				"fs.azure.account.oauth2.client.secret":               pulumi.Any(fmt.Sprintf("{{secrets/%v/%v}}", secretScope, secretKey)),
    				"fs.azure.account.oauth2.client.endpoint":             pulumi.Any(fmt.Sprintf("https://login.microsoftonline.com/%v/oauth2/token", tenantId)),
    				"fs.azure.createRemoteFileSystemDuringInitialization": pulumi.Any("false"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Mount;
    import com.pulumi.databricks.MountArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var tenantId = "00000000-1111-2222-3333-444444444444";
    
            final var clientId = "55555555-6666-7777-8888-999999999999";
    
            final var secretScope = "some-kv";
    
            final var secretKey = "some-sp-secret";
    
            final var container = "test";
    
            final var storageAcc = "lrs";
    
            var this_ = new Mount("this", MountArgs.builder()        
                .uri(String.format("abfss://%s@%s.dfs.core.windows.net", container,storageAcc))
                .extraConfigs(Map.ofEntries(
                    Map.entry("fs.azure.account.auth.type", "OAuth"),
                    Map.entry("fs.azure.account.oauth.provider.type", "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider"),
                    Map.entry("fs.azure.account.oauth2.client.id", clientId),
                    Map.entry("fs.azure.account.oauth2.client.secret", String.format("{{{{secrets/%s/%s}}}}", secretScope,secretKey)),
                    Map.entry("fs.azure.account.oauth2.client.endpoint", String.format("https://login.microsoftonline.com/%s/oauth2/token", tenantId)),
                    Map.entry("fs.azure.createRemoteFileSystemDuringInitialization", "false")
                ))
                .build());
    
        }
    }
    
    resources:
      this:
        type: databricks:Mount
        properties:
          uri: abfss://${container}@${storageAcc}.dfs.core.windows.net
          extraConfigs:
            fs.azure.account.auth.type: OAuth
            fs.azure.account.oauth.provider.type: org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider
            fs.azure.account.oauth2.client.id: ${clientId}
            fs.azure.account.oauth2.client.secret: '{{secrets/${secretScope}/${secretKey}}}'
            fs.azure.account.oauth2.client.endpoint: https://login.microsoftonline.com/${tenantId}/oauth2/token
            fs.azure.createRemoteFileSystemDuringInitialization: 'false'
    variables:
      tenantId: 00000000-1111-2222-3333-444444444444
      clientId: 55555555-6666-7777-8888-999999999999
      secretScope: some-kv
      secretKey: some-sp-secret
      container: test
      storageAcc: lrs
    

    Example mounting ADLS Gen2 with AAD passthrough

    Note AAD passthrough is considered a legacy data access pattern. Use Unity Catalog for fine-grained data access control.

    Note Mounts using AAD passthrough cannot be created using a service principal.

    To mount ALDS Gen2 with Azure Active Directory Credentials passthrough we need to execute the mount commands using the cluster configured with AAD Credentials passthrough & provide necessary configuration parameters (see documentation for more details).

    import * as pulumi from "@pulumi/pulumi";
    import * as azure from "@pulumi/azure";
    import * as databricks from "@pulumi/databricks";
    
    const config = new pulumi.Config();
    const resourceGroup = config.require("resourceGroup");
    const workspaceName = config.require("workspaceName");
    const this = azure.databricks.getWorkspace({
        name: workspaceName,
        resourceGroupName: resourceGroup,
    });
    const smallest = databricks.getNodeType({
        localDisk: true,
    });
    const latest = databricks.getSparkVersion({});
    const sharedPassthrough = new databricks.Cluster("sharedPassthrough", {
        clusterName: "Shared Passthrough for mount",
        sparkVersion: latest.then(latest => latest.id),
        nodeTypeId: smallest.then(smallest => smallest.id),
        autoterminationMinutes: 10,
        numWorkers: 1,
        sparkConf: {
            "spark.databricks.cluster.profile": "serverless",
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.passthrough.enabled": "true",
            "spark.databricks.pyspark.enableProcessIsolation": "true",
        },
        customTags: {
            ResourceClass: "Serverless",
        },
    });
    const storageAcc = config.require("storageAcc");
    const container = config.require("container");
    const passthrough = new databricks.Mount("passthrough", {
        clusterId: sharedPassthrough.id,
        uri: `abfss://${container}@${storageAcc}.dfs.core.windows.net`,
        extraConfigs: {
            "fs.azure.account.auth.type": "CustomAccessToken",
            "fs.azure.account.custom.token.provider.class": "{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}",
        },
    });
    
    import pulumi
    import pulumi_azure as azure
    import pulumi_databricks as databricks
    
    config = pulumi.Config()
    resource_group = config.require("resourceGroup")
    workspace_name = config.require("workspaceName")
    this = azure.databricks.get_workspace(name=workspace_name,
        resource_group_name=resource_group)
    smallest = databricks.get_node_type(local_disk=True)
    latest = databricks.get_spark_version()
    shared_passthrough = databricks.Cluster("sharedPassthrough",
        cluster_name="Shared Passthrough for mount",
        spark_version=latest.id,
        node_type_id=smallest.id,
        autotermination_minutes=10,
        num_workers=1,
        spark_conf={
            "spark.databricks.cluster.profile": "serverless",
            "spark.databricks.repl.allowedLanguages": "python,sql",
            "spark.databricks.passthrough.enabled": "true",
            "spark.databricks.pyspark.enableProcessIsolation": "true",
        },
        custom_tags={
            "ResourceClass": "Serverless",
        })
    storage_acc = config.require("storageAcc")
    container = config.require("container")
    passthrough = databricks.Mount("passthrough",
        cluster_id=shared_passthrough.id,
        uri=f"abfss://{container}@{storage_acc}.dfs.core.windows.net",
        extra_configs={
            "fs.azure.account.auth.type": "CustomAccessToken",
            "fs.azure.account.custom.token.provider.class": "{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}",
        })
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Azure = Pulumi.Azure;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var config = new Config();
        var resourceGroup = config.Require("resourceGroup");
        var workspaceName = config.Require("workspaceName");
        var @this = Azure.DataBricks.GetWorkspace.Invoke(new()
        {
            Name = workspaceName,
            ResourceGroupName = resourceGroup,
        });
    
        var smallest = Databricks.GetNodeType.Invoke(new()
        {
            LocalDisk = true,
        });
    
        var latest = Databricks.GetSparkVersion.Invoke();
    
        var sharedPassthrough = new Databricks.Cluster("sharedPassthrough", new()
        {
            ClusterName = "Shared Passthrough for mount",
            SparkVersion = latest.Apply(getSparkVersionResult => getSparkVersionResult.Id),
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AutoterminationMinutes = 10,
            NumWorkers = 1,
            SparkConf = 
            {
                { "spark.databricks.cluster.profile", "serverless" },
                { "spark.databricks.repl.allowedLanguages", "python,sql" },
                { "spark.databricks.passthrough.enabled", "true" },
                { "spark.databricks.pyspark.enableProcessIsolation", "true" },
            },
            CustomTags = 
            {
                { "ResourceClass", "Serverless" },
            },
        });
    
        var storageAcc = config.Require("storageAcc");
        var container = config.Require("container");
        var passthrough = new Databricks.Mount("passthrough", new()
        {
            ClusterId = sharedPassthrough.Id,
            Uri = $"abfss://{container}@{storageAcc}.dfs.core.windows.net",
            ExtraConfigs = 
            {
                { "fs.azure.account.auth.type", "CustomAccessToken" },
                { "fs.azure.account.custom.token.provider.class", "{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}" },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	azuredatabricks "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/databricks"
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		cfg := config.New(ctx, "")
    		resourceGroup := cfg.Require("resourceGroup")
    		workspaceName := cfg.Require("workspaceName")
    		_, err := azuredatabricks.LookupWorkspace(ctx, &databricks.LookupWorkspaceArgs{
    			Name:              workspaceName,
    			ResourceGroupName: resourceGroup,
    		}, nil)
    		if err != nil {
    			return err
    		}
    		smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
    			LocalDisk: pulumi.BoolRef(true),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		latest, err := databricks.GetSparkVersion(ctx, nil, nil)
    		if err != nil {
    			return err
    		}
    		sharedPassthrough, err := databricks.NewCluster(ctx, "sharedPassthrough", &databricks.ClusterArgs{
    			ClusterName:            pulumi.String("Shared Passthrough for mount"),
    			SparkVersion:           *pulumi.String(latest.Id),
    			NodeTypeId:             *pulumi.String(smallest.Id),
    			AutoterminationMinutes: pulumi.Int(10),
    			NumWorkers:             pulumi.Int(1),
    			SparkConf: pulumi.Map{
    				"spark.databricks.cluster.profile":                pulumi.Any("serverless"),
    				"spark.databricks.repl.allowedLanguages":          pulumi.Any("python,sql"),
    				"spark.databricks.passthrough.enabled":            pulumi.Any("true"),
    				"spark.databricks.pyspark.enableProcessIsolation": pulumi.Any("true"),
    			},
    			CustomTags: pulumi.Map{
    				"ResourceClass": pulumi.Any("Serverless"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		storageAcc := cfg.Require("storageAcc")
    		container := cfg.Require("container")
    		_, err = databricks.NewMount(ctx, "passthrough", &databricks.MountArgs{
    			ClusterId: sharedPassthrough.ID(),
    			Uri:       pulumi.String(fmt.Sprintf("abfss://%v@%v.dfs.core.windows.net", container, storageAcc)),
    			ExtraConfigs: pulumi.Map{
    				"fs.azure.account.auth.type":                   pulumi.Any("CustomAccessToken"),
    				"fs.azure.account.custom.token.provider.class": pulumi.Any("{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azure.databricks.DatabricksFunctions;
    import com.pulumi.azure.databricks.inputs.GetWorkspaceArgs;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.inputs.GetSparkVersionArgs;
    import com.pulumi.databricks.Cluster;
    import com.pulumi.databricks.ClusterArgs;
    import com.pulumi.databricks.Mount;
    import com.pulumi.databricks.MountArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var config = ctx.config();
            final var resourceGroup = config.get("resourceGroup");
            final var workspaceName = config.get("workspaceName");
            final var this = DatabricksFunctions.getWorkspace(GetWorkspaceArgs.builder()
                .name(workspaceName)
                .resourceGroupName(resourceGroup)
                .build());
    
            final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
                .localDisk(true)
                .build());
    
            final var latest = DatabricksFunctions.getSparkVersion();
    
            var sharedPassthrough = new Cluster("sharedPassthrough", ClusterArgs.builder()        
                .clusterName("Shared Passthrough for mount")
                .sparkVersion(latest.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .autoterminationMinutes(10)
                .numWorkers(1)
                .sparkConf(Map.ofEntries(
                    Map.entry("spark.databricks.cluster.profile", "serverless"),
                    Map.entry("spark.databricks.repl.allowedLanguages", "python,sql"),
                    Map.entry("spark.databricks.passthrough.enabled", "true"),
                    Map.entry("spark.databricks.pyspark.enableProcessIsolation", "true")
                ))
                .customTags(Map.of("ResourceClass", "Serverless"))
                .build());
    
            final var storageAcc = config.get("storageAcc");
            final var container = config.get("container");
            var passthrough = new Mount("passthrough", MountArgs.builder()        
                .clusterId(sharedPassthrough.id())
                .uri(String.format("abfss://%s@%s.dfs.core.windows.net", container,storageAcc))
                .extraConfigs(Map.ofEntries(
                    Map.entry("fs.azure.account.auth.type", "CustomAccessToken"),
                    Map.entry("fs.azure.account.custom.token.provider.class", "{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}")
                ))
                .build());
    
        }
    }
    
    configuration:
      resourceGroup:
        type: string
      workspaceName:
        type: string
      storageAcc:
        type: string
      container:
        type: string
    resources:
      sharedPassthrough:
        type: databricks:Cluster
        properties:
          clusterName: Shared Passthrough for mount
          sparkVersion: ${latest.id}
          nodeTypeId: ${smallest.id}
          autoterminationMinutes: 10
          numWorkers: 1
          sparkConf:
            spark.databricks.cluster.profile: serverless
            spark.databricks.repl.allowedLanguages: python,sql
            spark.databricks.passthrough.enabled: 'true'
            spark.databricks.pyspark.enableProcessIsolation: 'true'
          customTags:
            ResourceClass: Serverless
      passthrough:
        type: databricks:Mount
        properties:
          clusterId: ${sharedPassthrough.id}
          uri: abfss://${container}@${storageAcc}.dfs.core.windows.net
          extraConfigs:
            fs.azure.account.auth.type: CustomAccessToken
            fs.azure.account.custom.token.provider.class: '{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}'
    variables:
      this:
        fn::invoke:
          Function: azure:databricks:getWorkspace
          Arguments:
            name: ${workspaceName}
            resourceGroupName: ${resourceGroup}
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments:
            localDisk: true
      latest:
        fn::invoke:
          Function: databricks:getSparkVersion
          Arguments: {}
    

    s3 block

    This block allows specifying parameters for mounting of the ADLS Gen2. The following arguments are required inside the s3 block:

    • instance_profile - (Optional) (String) ARN of registered instance profile for data access. If it’s not specified, then the cluster_id should be provided, and the cluster should have an instance profile attached to it. If both cluster_id & instance_profile are specified, then cluster_id takes precedence.
    • bucket_name - (Required) (String) S3 bucket name to be mounted.

    Example of mounting S3

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    // now you can do `%fs ls /mnt/experiments` in notebooks
    const _this = new databricks.Mount("this", {s3: {
        instanceProfile: databricks_instance_profile.ds.id,
        bucketName: aws_s3_bucket["this"].bucket,
    }});
    
    import pulumi
    import pulumi_databricks as databricks
    
    # now you can do `%fs ls /mnt/experiments` in notebooks
    this = databricks.Mount("this", s3=databricks.MountS3Args(
        instance_profile=databricks_instance_profile["ds"]["id"],
        bucket_name=aws_s3_bucket["this"]["bucket"],
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        // now you can do `%fs ls /mnt/experiments` in notebooks
        var @this = new Databricks.Mount("this", new()
        {
            S3 = new Databricks.Inputs.MountS3Args
            {
                InstanceProfile = databricks_instance_profile.Ds.Id,
                BucketName = aws_s3_bucket.This.Bucket,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// now you can do `%fs ls /mnt/experiments` in notebooks
    		_, err := databricks.NewMount(ctx, "this", &databricks.MountArgs{
    			S3: &databricks.MountS3Args{
    				InstanceProfile: pulumi.Any(databricks_instance_profile.Ds.Id),
    				BucketName:      pulumi.Any(aws_s3_bucket.This.Bucket),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Mount;
    import com.pulumi.databricks.MountArgs;
    import com.pulumi.databricks.inputs.MountS3Args;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var this_ = new Mount("this", MountArgs.builder()        
                .s3(MountS3Args.builder()
                    .instanceProfile(databricks_instance_profile.ds().id())
                    .bucketName(aws_s3_bucket.this().bucket())
                    .build())
                .build());
    
        }
    }
    
    resources:
      # now you can do `%fs ls /mnt/experiments` in notebooks
      this:
        type: databricks:Mount
        properties:
          s3:
            instanceProfile: ${databricks_instance_profile.ds.id}
            bucketName: ${aws_s3_bucket.this.bucket}
    

    abfs block

    This block allows specifying parameters for mounting of the ADLS Gen2. The following arguments are required inside the abfs block:

    • client_id - (Required) (String) This is the client_id (Application Object ID) for the enterprise application for the service principal.
    • tenant_id - (Optional) (String) This is your azure directory tenant id. It is required for creating the mount. (Could be omitted if Azure authentication is used, and we can extract tenant_id from it).
    • client_secret_key - (Required) (String) This is the secret key in which your service principal/enterprise app client secret will be stored.
    • client_secret_scope - (Required) (String) This is the secret scope in which your service principal/enterprise app client secret will be stored.
    • container_name - (Required) (String) ADLS gen2 container name. (Could be omitted if resource_id is provided)
    • storage_account_name - (Required) (String) The name of the storage resource in which the data is. (Could be omitted if resource_id is provided)
    • directory - (Computed) (String) This is optional if you don’t want to add an additional directory that you wish to mount. This must start with a “/”.
    • initialize_file_system - (Required) (Bool) either or not initialize FS for the first use

    Creating mount for ADLS Gen2 using abfs block

    In this example, we’re using Azure authentication, so we can omit some parameters (tenant_id, storage_account_name, and container_name) that will be detected automatically.

    import * as pulumi from "@pulumi/pulumi";
    import * as azure from "@pulumi/azure";
    import * as databricks from "@pulumi/databricks";
    
    const terraform = new databricks.SecretScope("terraform", {initialManagePrincipal: "users"});
    const servicePrincipalKey = new databricks.Secret("servicePrincipalKey", {
        key: "service_principal_key",
        stringValue: _var.ARM_CLIENT_SECRET,
        scope: terraform.name,
    });
    const thisAccount = new azure.storage.Account("thisAccount", {
        resourceGroupName: _var.resource_group_name,
        location: _var.resource_group_location,
        accountTier: "Standard",
        accountReplicationType: "GRS",
        accountKind: "StorageV2",
        isHnsEnabled: true,
    });
    const thisAssignment = new azure.authorization.Assignment("thisAssignment", {
        scope: thisAccount.id,
        roleDefinitionName: "Storage Blob Data Contributor",
        principalId: data.azurerm_client_config.current.object_id,
    });
    const thisContainer = new azure.storage.Container("thisContainer", {
        storageAccountName: thisAccount.name,
        containerAccessType: "private",
    });
    const marketing = new databricks.Mount("marketing", {
        resourceId: thisContainer.resourceManagerId,
        abfs: {
            clientId: data.azurerm_client_config.current.client_id,
            clientSecretScope: terraform.name,
            clientSecretKey: servicePrincipalKey.key,
            initializeFileSystem: true,
        },
    });
    
    import pulumi
    import pulumi_azure as azure
    import pulumi_databricks as databricks
    
    terraform = databricks.SecretScope("terraform", initial_manage_principal="users")
    service_principal_key = databricks.Secret("servicePrincipalKey",
        key="service_principal_key",
        string_value=var["ARM_CLIENT_SECRET"],
        scope=terraform.name)
    this_account = azure.storage.Account("thisAccount",
        resource_group_name=var["resource_group_name"],
        location=var["resource_group_location"],
        account_tier="Standard",
        account_replication_type="GRS",
        account_kind="StorageV2",
        is_hns_enabled=True)
    this_assignment = azure.authorization.Assignment("thisAssignment",
        scope=this_account.id,
        role_definition_name="Storage Blob Data Contributor",
        principal_id=data["azurerm_client_config"]["current"]["object_id"])
    this_container = azure.storage.Container("thisContainer",
        storage_account_name=this_account.name,
        container_access_type="private")
    marketing = databricks.Mount("marketing",
        resource_id=this_container.resource_manager_id,
        abfs=databricks.MountAbfsArgs(
            client_id=data["azurerm_client_config"]["current"]["client_id"],
            client_secret_scope=terraform.name,
            client_secret_key=service_principal_key.key,
            initialize_file_system=True,
        ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Azure = Pulumi.Azure;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var terraform = new Databricks.SecretScope("terraform", new()
        {
            InitialManagePrincipal = "users",
        });
    
        var servicePrincipalKey = new Databricks.Secret("servicePrincipalKey", new()
        {
            Key = "service_principal_key",
            StringValue = @var.ARM_CLIENT_SECRET,
            Scope = terraform.Name,
        });
    
        var thisAccount = new Azure.Storage.Account("thisAccount", new()
        {
            ResourceGroupName = @var.Resource_group_name,
            Location = @var.Resource_group_location,
            AccountTier = "Standard",
            AccountReplicationType = "GRS",
            AccountKind = "StorageV2",
            IsHnsEnabled = true,
        });
    
        var thisAssignment = new Azure.Authorization.Assignment("thisAssignment", new()
        {
            Scope = thisAccount.Id,
            RoleDefinitionName = "Storage Blob Data Contributor",
            PrincipalId = data.Azurerm_client_config.Current.Object_id,
        });
    
        var thisContainer = new Azure.Storage.Container("thisContainer", new()
        {
            StorageAccountName = thisAccount.Name,
            ContainerAccessType = "private",
        });
    
        var marketing = new Databricks.Mount("marketing", new()
        {
            ResourceId = thisContainer.ResourceManagerId,
            Abfs = new Databricks.Inputs.MountAbfsArgs
            {
                ClientId = data.Azurerm_client_config.Current.Client_id,
                ClientSecretScope = terraform.Name,
                ClientSecretKey = servicePrincipalKey.Key,
                InitializeFileSystem = true,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/authorization"
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		terraform, err := databricks.NewSecretScope(ctx, "terraform", &databricks.SecretScopeArgs{
    			InitialManagePrincipal: pulumi.String("users"),
    		})
    		if err != nil {
    			return err
    		}
    		servicePrincipalKey, err := databricks.NewSecret(ctx, "servicePrincipalKey", &databricks.SecretArgs{
    			Key:         pulumi.String("service_principal_key"),
    			StringValue: pulumi.Any(_var.ARM_CLIENT_SECRET),
    			Scope:       terraform.Name,
    		})
    		if err != nil {
    			return err
    		}
    		thisAccount, err := storage.NewAccount(ctx, "thisAccount", &storage.AccountArgs{
    			ResourceGroupName:      pulumi.Any(_var.Resource_group_name),
    			Location:               pulumi.Any(_var.Resource_group_location),
    			AccountTier:            pulumi.String("Standard"),
    			AccountReplicationType: pulumi.String("GRS"),
    			AccountKind:            pulumi.String("StorageV2"),
    			IsHnsEnabled:           pulumi.Bool(true),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = authorization.NewAssignment(ctx, "thisAssignment", &authorization.AssignmentArgs{
    			Scope:              thisAccount.ID(),
    			RoleDefinitionName: pulumi.String("Storage Blob Data Contributor"),
    			PrincipalId:        pulumi.Any(data.Azurerm_client_config.Current.Object_id),
    		})
    		if err != nil {
    			return err
    		}
    		thisContainer, err := storage.NewContainer(ctx, "thisContainer", &storage.ContainerArgs{
    			StorageAccountName:  thisAccount.Name,
    			ContainerAccessType: pulumi.String("private"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewMount(ctx, "marketing", &databricks.MountArgs{
    			ResourceId: thisContainer.ResourceManagerId,
    			Abfs: &databricks.MountAbfsArgs{
    				ClientId:             pulumi.Any(data.Azurerm_client_config.Current.Client_id),
    				ClientSecretScope:    terraform.Name,
    				ClientSecretKey:      servicePrincipalKey.Key,
    				InitializeFileSystem: pulumi.Bool(true),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.SecretScope;
    import com.pulumi.databricks.SecretScopeArgs;
    import com.pulumi.databricks.Secret;
    import com.pulumi.databricks.SecretArgs;
    import com.pulumi.azure.storage.Account;
    import com.pulumi.azure.storage.AccountArgs;
    import com.pulumi.azure.authorization.Assignment;
    import com.pulumi.azure.authorization.AssignmentArgs;
    import com.pulumi.azure.storage.Container;
    import com.pulumi.azure.storage.ContainerArgs;
    import com.pulumi.databricks.Mount;
    import com.pulumi.databricks.MountArgs;
    import com.pulumi.databricks.inputs.MountAbfsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var terraform = new SecretScope("terraform", SecretScopeArgs.builder()        
                .initialManagePrincipal("users")
                .build());
    
            var servicePrincipalKey = new Secret("servicePrincipalKey", SecretArgs.builder()        
                .key("service_principal_key")
                .stringValue(var_.ARM_CLIENT_SECRET())
                .scope(terraform.name())
                .build());
    
            var thisAccount = new Account("thisAccount", AccountArgs.builder()        
                .resourceGroupName(var_.resource_group_name())
                .location(var_.resource_group_location())
                .accountTier("Standard")
                .accountReplicationType("GRS")
                .accountKind("StorageV2")
                .isHnsEnabled(true)
                .build());
    
            var thisAssignment = new Assignment("thisAssignment", AssignmentArgs.builder()        
                .scope(thisAccount.id())
                .roleDefinitionName("Storage Blob Data Contributor")
                .principalId(data.azurerm_client_config().current().object_id())
                .build());
    
            var thisContainer = new Container("thisContainer", ContainerArgs.builder()        
                .storageAccountName(thisAccount.name())
                .containerAccessType("private")
                .build());
    
            var marketing = new Mount("marketing", MountArgs.builder()        
                .resourceId(thisContainer.resourceManagerId())
                .abfs(MountAbfsArgs.builder()
                    .clientId(data.azurerm_client_config().current().client_id())
                    .clientSecretScope(terraform.name())
                    .clientSecretKey(servicePrincipalKey.key())
                    .initializeFileSystem(true)
                    .build())
                .build());
    
        }
    }
    
    resources:
      terraform:
        type: databricks:SecretScope
        properties:
          initialManagePrincipal: users
      servicePrincipalKey:
        type: databricks:Secret
        properties:
          key: service_principal_key
          stringValue: ${var.ARM_CLIENT_SECRET}
          scope: ${terraform.name}
      thisAccount:
        type: azure:storage:Account
        properties:
          resourceGroupName: ${var.resource_group_name}
          location: ${var.resource_group_location}
          accountTier: Standard
          accountReplicationType: GRS
          accountKind: StorageV2
          isHnsEnabled: true
      thisAssignment:
        type: azure:authorization:Assignment
        properties:
          scope: ${thisAccount.id}
          roleDefinitionName: Storage Blob Data Contributor
          principalId: ${data.azurerm_client_config.current.object_id}
      thisContainer:
        type: azure:storage:Container
        properties:
          storageAccountName: ${thisAccount.name}
          containerAccessType: private
      marketing:
        type: databricks:Mount
        properties:
          resourceId: ${thisContainer.resourceManagerId}
          abfs:
            clientId: ${data.azurerm_client_config.current.client_id}
            clientSecretScope: ${terraform.name}
            clientSecretKey: ${servicePrincipalKey.key}
            initializeFileSystem: true
    

    gs block

    This block allows specifying parameters for mounting of the Google Cloud Storage. The following arguments are required inside the gs block:

    • service_account - (Optional) (String) email of registered Google Service Account for data access. If it’s not specified, then the cluster_id should be provided, and the cluster should have a Google service account attached to it.
    • bucket_name - (Required) (String) GCS bucket name to be mounted.

    Example mounting Google Cloud Storage

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const thisGs = new databricks.Mount("thisGs", {gs: {
        bucketName: "mybucket",
        serviceAccount: "acc@company.iam.gserviceaccount.com",
    }});
    
    import pulumi
    import pulumi_databricks as databricks
    
    this_gs = databricks.Mount("thisGs", gs=databricks.MountGsArgs(
        bucket_name="mybucket",
        service_account="acc@company.iam.gserviceaccount.com",
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var thisGs = new Databricks.Mount("thisGs", new()
        {
            Gs = new Databricks.Inputs.MountGsArgs
            {
                BucketName = "mybucket",
                ServiceAccount = "acc@company.iam.gserviceaccount.com",
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewMount(ctx, "thisGs", &databricks.MountArgs{
    			Gs: &databricks.MountGsArgs{
    				BucketName:     pulumi.String("mybucket"),
    				ServiceAccount: pulumi.String("acc@company.iam.gserviceaccount.com"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Mount;
    import com.pulumi.databricks.MountArgs;
    import com.pulumi.databricks.inputs.MountGsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var thisGs = new Mount("thisGs", MountArgs.builder()        
                .gs(MountGsArgs.builder()
                    .bucketName("mybucket")
                    .serviceAccount("acc@company.iam.gserviceaccount.com")
                    .build())
                .build());
    
        }
    }
    
    resources:
      thisGs:
        type: databricks:Mount
        properties:
          gs:
            bucketName: mybucket
            serviceAccount: acc@company.iam.gserviceaccount.com
    

    adl block

    This block allows specifying parameters for mounting of the ADLS Gen1. The following arguments are required inside the adl block:

    • client_id - (Required) (String) This is the client_id for the enterprise application for the service principal.

    • tenant_id - (Optional) (String) This is your azure directory tenant id. It is required for creating the mount. (Could be omitted if Azure authentication is used, and we can extract tenant_id from it)

    • client_secret_key - (Required) (String) This is the secret key in which your service principal/enterprise app client secret will be stored.

    • client_secret_scope - (Required) (String) This is the secret scope in which your service principal/enterprise app client secret will be stored.

    • storage_resource_name - (Required) (String) The name of the storage resource in which the data is for ADLS gen 1. This is what you are trying to mount. (Could be omitted if resource_id is provided)

    • spark_conf_prefix - (Optional) (String) This is the spark configuration prefix for adls gen 1 mount. The options are fs.adl, dfs.adls. Use fs.adl for runtime 6.0 and above for the clusters. Otherwise use dfs.adls. The default value is: fs.adl.

    • directory - (Computed) (String) This is optional if you don’t want to add an additional directory that you wish to mount. This must start with a “/”.

    Example mounting ADLS Gen1

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const mount = new databricks.Mount("mount", {adl: {
        storageResourceName: "{env.TEST_STORAGE_ACCOUNT_NAME}",
        tenantId: data.azurerm_client_config.current.tenant_id,
        clientId: data.azurerm_client_config.current.client_id,
        clientSecretScope: databricks_secret_scope.terraform.name,
        clientSecretKey: databricks_secret.service_principal_key.key,
        sparkConfPrefix: "fs.adl",
    }});
    
    import pulumi
    import pulumi_databricks as databricks
    
    mount = databricks.Mount("mount", adl=databricks.MountAdlArgs(
        storage_resource_name="{env.TEST_STORAGE_ACCOUNT_NAME}",
        tenant_id=data["azurerm_client_config"]["current"]["tenant_id"],
        client_id=data["azurerm_client_config"]["current"]["client_id"],
        client_secret_scope=databricks_secret_scope["terraform"]["name"],
        client_secret_key=databricks_secret["service_principal_key"]["key"],
        spark_conf_prefix="fs.adl",
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var mount = new Databricks.Mount("mount", new()
        {
            Adl = new Databricks.Inputs.MountAdlArgs
            {
                StorageResourceName = "{env.TEST_STORAGE_ACCOUNT_NAME}",
                TenantId = data.Azurerm_client_config.Current.Tenant_id,
                ClientId = data.Azurerm_client_config.Current.Client_id,
                ClientSecretScope = databricks_secret_scope.Terraform.Name,
                ClientSecretKey = databricks_secret.Service_principal_key.Key,
                SparkConfPrefix = "fs.adl",
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := databricks.NewMount(ctx, "mount", &databricks.MountArgs{
    			Adl: &databricks.MountAdlArgs{
    				StorageResourceName: pulumi.String("{env.TEST_STORAGE_ACCOUNT_NAME}"),
    				TenantId:            pulumi.Any(data.Azurerm_client_config.Current.Tenant_id),
    				ClientId:            pulumi.Any(data.Azurerm_client_config.Current.Client_id),
    				ClientSecretScope:   pulumi.Any(databricks_secret_scope.Terraform.Name),
    				ClientSecretKey:     pulumi.Any(databricks_secret.Service_principal_key.Key),
    				SparkConfPrefix:     pulumi.String("fs.adl"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Mount;
    import com.pulumi.databricks.MountArgs;
    import com.pulumi.databricks.inputs.MountAdlArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var mount = new Mount("mount", MountArgs.builder()        
                .adl(MountAdlArgs.builder()
                    .storageResourceName("{env.TEST_STORAGE_ACCOUNT_NAME}")
                    .tenantId(data.azurerm_client_config().current().tenant_id())
                    .clientId(data.azurerm_client_config().current().client_id())
                    .clientSecretScope(databricks_secret_scope.terraform().name())
                    .clientSecretKey(databricks_secret.service_principal_key().key())
                    .sparkConfPrefix("fs.adl")
                    .build())
                .build());
    
        }
    }
    
    resources:
      mount:
        type: databricks:Mount
        properties:
          adl:
            storageResourceName: '{env.TEST_STORAGE_ACCOUNT_NAME}'
            tenantId: ${data.azurerm_client_config.current.tenant_id}
            clientId: ${data.azurerm_client_config.current.client_id}
            clientSecretScope: ${databricks_secret_scope.terraform.name}
            clientSecretKey: ${databricks_secret.service_principal_key.key}
            sparkConfPrefix: fs.adl
    

    wasb block

    This block allows specifying parameters for mounting of the Azure Blob Storage. The following arguments are required inside the wasb block:

    • auth_type - (Required) (String) This is the auth type for blob storage. This can either be SAS tokens (SAS) or account access keys (ACCESS_KEY).
    • token_secret_scope - (Required) (String) This is the secret scope in which your auth type token is stored.
    • token_secret_key - (Required) (String) This is the secret key in which your auth type token is stored.
    • container_name - (Required) (String) The container in which the data is. This is what you are trying to mount. (Could be omitted if resource_id is provided)
    • storage_account_name - (Required) (String) The name of the storage resource in which the data is. (Could be omitted if resource_id is provided)
    • directory - (Computed) (String) This is optional if you don’t want to add an additional directory that you wish to mount. This must start with a “/”.

    Example mounting Azure Blob Storage

    import * as pulumi from "@pulumi/pulumi";
    import * as azure from "@pulumi/azure";
    import * as databricks from "@pulumi/databricks";
    
    const blobaccount = new azure.storage.Account("blobaccount", {
        resourceGroupName: _var.resource_group_name,
        location: _var.resource_group_location,
        accountTier: "Standard",
        accountReplicationType: "LRS",
        accountKind: "StorageV2",
    });
    const marketingContainer = new azure.storage.Container("marketingContainer", {
        storageAccountName: blobaccount.name,
        containerAccessType: "private",
    });
    const terraform = new databricks.SecretScope("terraform", {initialManagePrincipal: "users"});
    const storageKey = new databricks.Secret("storageKey", {
        key: "blob_storage_key",
        stringValue: blobaccount.primaryAccessKey,
        scope: terraform.name,
    });
    const marketingMount = new databricks.Mount("marketingMount", {wasb: {
        containerName: marketingContainer.name,
        storageAccountName: blobaccount.name,
        authType: "ACCESS_KEY",
        tokenSecretScope: terraform.name,
        tokenSecretKey: storageKey.key,
    }});
    
    import pulumi
    import pulumi_azure as azure
    import pulumi_databricks as databricks
    
    blobaccount = azure.storage.Account("blobaccount",
        resource_group_name=var["resource_group_name"],
        location=var["resource_group_location"],
        account_tier="Standard",
        account_replication_type="LRS",
        account_kind="StorageV2")
    marketing_container = azure.storage.Container("marketingContainer",
        storage_account_name=blobaccount.name,
        container_access_type="private")
    terraform = databricks.SecretScope("terraform", initial_manage_principal="users")
    storage_key = databricks.Secret("storageKey",
        key="blob_storage_key",
        string_value=blobaccount.primary_access_key,
        scope=terraform.name)
    marketing_mount = databricks.Mount("marketingMount", wasb=databricks.MountWasbArgs(
        container_name=marketing_container.name,
        storage_account_name=blobaccount.name,
        auth_type="ACCESS_KEY",
        token_secret_scope=terraform.name,
        token_secret_key=storage_key.key,
    ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Azure = Pulumi.Azure;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var blobaccount = new Azure.Storage.Account("blobaccount", new()
        {
            ResourceGroupName = @var.Resource_group_name,
            Location = @var.Resource_group_location,
            AccountTier = "Standard",
            AccountReplicationType = "LRS",
            AccountKind = "StorageV2",
        });
    
        var marketingContainer = new Azure.Storage.Container("marketingContainer", new()
        {
            StorageAccountName = blobaccount.Name,
            ContainerAccessType = "private",
        });
    
        var terraform = new Databricks.SecretScope("terraform", new()
        {
            InitialManagePrincipal = "users",
        });
    
        var storageKey = new Databricks.Secret("storageKey", new()
        {
            Key = "blob_storage_key",
            StringValue = blobaccount.PrimaryAccessKey,
            Scope = terraform.Name,
        });
    
        var marketingMount = new Databricks.Mount("marketingMount", new()
        {
            Wasb = new Databricks.Inputs.MountWasbArgs
            {
                ContainerName = marketingContainer.Name,
                StorageAccountName = blobaccount.Name,
                AuthType = "ACCESS_KEY",
                TokenSecretScope = terraform.Name,
                TokenSecretKey = storageKey.Key,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		blobaccount, err := storage.NewAccount(ctx, "blobaccount", &storage.AccountArgs{
    			ResourceGroupName:      pulumi.Any(_var.Resource_group_name),
    			Location:               pulumi.Any(_var.Resource_group_location),
    			AccountTier:            pulumi.String("Standard"),
    			AccountReplicationType: pulumi.String("LRS"),
    			AccountKind:            pulumi.String("StorageV2"),
    		})
    		if err != nil {
    			return err
    		}
    		marketingContainer, err := storage.NewContainer(ctx, "marketingContainer", &storage.ContainerArgs{
    			StorageAccountName:  blobaccount.Name,
    			ContainerAccessType: pulumi.String("private"),
    		})
    		if err != nil {
    			return err
    		}
    		terraform, err := databricks.NewSecretScope(ctx, "terraform", &databricks.SecretScopeArgs{
    			InitialManagePrincipal: pulumi.String("users"),
    		})
    		if err != nil {
    			return err
    		}
    		storageKey, err := databricks.NewSecret(ctx, "storageKey", &databricks.SecretArgs{
    			Key:         pulumi.String("blob_storage_key"),
    			StringValue: blobaccount.PrimaryAccessKey,
    			Scope:       terraform.Name,
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewMount(ctx, "marketingMount", &databricks.MountArgs{
    			Wasb: &databricks.MountWasbArgs{
    				ContainerName:      marketingContainer.Name,
    				StorageAccountName: blobaccount.Name,
    				AuthType:           pulumi.String("ACCESS_KEY"),
    				TokenSecretScope:   terraform.Name,
    				TokenSecretKey:     storageKey.Key,
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azure.storage.Account;
    import com.pulumi.azure.storage.AccountArgs;
    import com.pulumi.azure.storage.Container;
    import com.pulumi.azure.storage.ContainerArgs;
    import com.pulumi.databricks.SecretScope;
    import com.pulumi.databricks.SecretScopeArgs;
    import com.pulumi.databricks.Secret;
    import com.pulumi.databricks.SecretArgs;
    import com.pulumi.databricks.Mount;
    import com.pulumi.databricks.MountArgs;
    import com.pulumi.databricks.inputs.MountWasbArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var blobaccount = new Account("blobaccount", AccountArgs.builder()        
                .resourceGroupName(var_.resource_group_name())
                .location(var_.resource_group_location())
                .accountTier("Standard")
                .accountReplicationType("LRS")
                .accountKind("StorageV2")
                .build());
    
            var marketingContainer = new Container("marketingContainer", ContainerArgs.builder()        
                .storageAccountName(blobaccount.name())
                .containerAccessType("private")
                .build());
    
            var terraform = new SecretScope("terraform", SecretScopeArgs.builder()        
                .initialManagePrincipal("users")
                .build());
    
            var storageKey = new Secret("storageKey", SecretArgs.builder()        
                .key("blob_storage_key")
                .stringValue(blobaccount.primaryAccessKey())
                .scope(terraform.name())
                .build());
    
            var marketingMount = new Mount("marketingMount", MountArgs.builder()        
                .wasb(MountWasbArgs.builder()
                    .containerName(marketingContainer.name())
                    .storageAccountName(blobaccount.name())
                    .authType("ACCESS_KEY")
                    .tokenSecretScope(terraform.name())
                    .tokenSecretKey(storageKey.key())
                    .build())
                .build());
    
        }
    }
    
    resources:
      blobaccount:
        type: azure:storage:Account
        properties:
          resourceGroupName: ${var.resource_group_name}
          location: ${var.resource_group_location}
          accountTier: Standard
          accountReplicationType: LRS
          accountKind: StorageV2
      marketingContainer:
        type: azure:storage:Container
        properties:
          storageAccountName: ${blobaccount.name}
          containerAccessType: private
      terraform:
        type: databricks:SecretScope
        properties:
          initialManagePrincipal: users
      storageKey:
        type: databricks:Secret
        properties:
          key: blob_storage_key
          stringValue: ${blobaccount.primaryAccessKey}
          scope: ${terraform.name}
      marketingMount:
        type: databricks:Mount
        properties:
          wasb:
            containerName: ${marketingContainer.name}
            storageAccountName: ${blobaccount.name}
            authType: ACCESS_KEY
            tokenSecretScope: ${terraform.name}
            tokenSecretKey: ${storageKey.key}
    

    Migration from other mount resources

    Migration from the specific mount resource is straightforward:

    • rename mount_name to name
    • wrap storage-specific settings (container_name, …) into corresponding block (adl, abfs, s3, wasbs)
    • for S3 mounts, rename s3_bucket_name to bucket_name

    The following resources are often used in the same context:

    • End to end workspace management guide.
    • databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it.
    • databricks.Cluster to create Databricks Clusters.
    • databricks.DbfsFile data to get file content from Databricks File System (DBFS).
    • databricks.getDbfsFilePaths data to get list of file names from get file content from Databricks File System (DBFS).
    • databricks.DbfsFile to manage relatively small files on Databricks File System (DBFS).
    • databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.
    • databricks.Library to install a library on databricks_cluster.

    Create Mount Resource

    new Mount(name: string, args?: MountArgs, opts?: CustomResourceOptions);
    @overload
    def Mount(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              abfs: Optional[MountAbfsArgs] = None,
              adl: Optional[MountAdlArgs] = None,
              cluster_id: Optional[str] = None,
              encryption_type: Optional[str] = None,
              extra_configs: Optional[Mapping[str, Any]] = None,
              gs: Optional[MountGsArgs] = None,
              name: Optional[str] = None,
              resource_id: Optional[str] = None,
              s3: Optional[MountS3Args] = None,
              uri: Optional[str] = None,
              wasb: Optional[MountWasbArgs] = None)
    @overload
    def Mount(resource_name: str,
              args: Optional[MountArgs] = None,
              opts: Optional[ResourceOptions] = None)
    func NewMount(ctx *Context, name string, args *MountArgs, opts ...ResourceOption) (*Mount, error)
    public Mount(string name, MountArgs? args = null, CustomResourceOptions? opts = null)
    public Mount(String name, MountArgs args)
    public Mount(String name, MountArgs args, CustomResourceOptions options)
    
    type: databricks:Mount
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args MountArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args MountArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args MountArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args MountArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args MountArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Mount Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Mount resource accepts the following input properties:

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Mount resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Source string
    (String) HDFS-compatible url
    Id string
    The provider-assigned unique ID for this managed resource.
    Source string
    (String) HDFS-compatible url
    id String
    The provider-assigned unique ID for this managed resource.
    source String
    (String) HDFS-compatible url
    id string
    The provider-assigned unique ID for this managed resource.
    source string
    (String) HDFS-compatible url
    id str
    The provider-assigned unique ID for this managed resource.
    source str
    (String) HDFS-compatible url
    id String
    The provider-assigned unique ID for this managed resource.
    source String
    (String) HDFS-compatible url

    Look up Existing Mount Resource

    Get an existing Mount resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: MountState, opts?: CustomResourceOptions): Mount
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            abfs: Optional[MountAbfsArgs] = None,
            adl: Optional[MountAdlArgs] = None,
            cluster_id: Optional[str] = None,
            encryption_type: Optional[str] = None,
            extra_configs: Optional[Mapping[str, Any]] = None,
            gs: Optional[MountGsArgs] = None,
            name: Optional[str] = None,
            resource_id: Optional[str] = None,
            s3: Optional[MountS3Args] = None,
            source: Optional[str] = None,
            uri: Optional[str] = None,
            wasb: Optional[MountWasbArgs] = None) -> Mount
    func GetMount(ctx *Context, name string, id IDInput, state *MountState, opts ...ResourceOption) (*Mount, error)
    public static Mount Get(string name, Input<string> id, MountState? state, CustomResourceOptions? opts = null)
    public static Mount get(String name, Output<String> id, MountState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Abfs MountAbfs
    Adl MountAdl
    ClusterId string
    EncryptionType string
    ExtraConfigs Dictionary<string, object>
    Gs MountGs
    Name string
    ResourceId string
    S3 MountS3
    Source string
    (String) HDFS-compatible url
    Uri string
    Wasb MountWasb
    Abfs MountAbfsArgs
    Adl MountAdlArgs
    ClusterId string
    EncryptionType string
    ExtraConfigs map[string]interface{}
    Gs MountGsArgs
    Name string
    ResourceId string
    S3 MountS3Args
    Source string
    (String) HDFS-compatible url
    Uri string
    Wasb MountWasbArgs
    abfs MountAbfs
    adl MountAdl
    clusterId String
    encryptionType String
    extraConfigs Map<String,Object>
    gs MountGs
    name String
    resourceId String
    s3 MountS3
    source String
    (String) HDFS-compatible url
    uri String
    wasb MountWasb
    abfs MountAbfs
    adl MountAdl
    clusterId string
    encryptionType string
    extraConfigs {[key: string]: any}
    gs MountGs
    name string
    resourceId string
    s3 MountS3
    source string
    (String) HDFS-compatible url
    uri string
    wasb MountWasb

    Supporting Types

    MountAbfs, MountAbfsArgs

    MountAdl, MountAdlArgs

    MountGs, MountGsArgs

    MountS3, MountS3Args

    MountWasb, MountWasbArgs

    Import

    -> Note Importing this resource is not currently supported.

    Package Details

    Repository
    databricks pulumi/pulumi-databricks
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the databricks Terraform Provider.
    databricks logo
    Databricks v1.34.0 published on Tuesday, Mar 5, 2024 by Pulumi