1. Packages
  2. Databricks
  3. API Docs
  4. MwsWorkspaces
Databricks v1.35.0 published on Friday, Mar 29, 2024 by Pulumi

databricks.MwsWorkspaces

Explore with Pulumi AI

databricks logo
Databricks v1.35.0 published on Friday, Mar 29, 2024 by Pulumi

    Example Usage

    Creating a Databricks on AWS workspace

    !Simplest multiworkspace

    To get workspace running, you have to configure a couple of things:

    • databricks.MwsCredentials - You can share a credentials (cross-account IAM role) configuration ID with multiple workspaces. It is not required to create a new one for each workspace.
    • databricks.MwsStorageConfigurations - You can share a root S3 bucket with multiple workspaces in a single account. You do not have to create new ones for each workspace. If you share a root S3 bucket for multiple workspaces in an account, data on the root S3 bucket is partitioned into separate directories by workspace.
    • databricks.MwsNetworks - (optional, but recommended) You can share one customer-managed VPC with multiple workspaces in a single account. You do not have to create a new VPC for each workspace. However, you cannot reuse subnets or security groups with other resources, including other workspaces or non-Databricks resources. If you plan to share one VPC with multiple workspaces, be sure to size your VPC and subnets accordingly. Because a Databricks databricks.MwsNetworks encapsulates this information, you cannot reuse it across workspaces.
    • databricks.MwsCustomerManagedKeys - You can share a customer-managed key across workspaces.
    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const config = new pulumi.Config();
    const databricksAccountId = config.requireObject("databricksAccountId");
    const mws = new databricks.Provider("mws", {host: "https://accounts.cloud.databricks.com"});
    // register cross-account ARN
    const thisMwsCredentials = new databricks.MwsCredentials("thisMwsCredentials", {
        accountId: databricksAccountId,
        credentialsName: `${_var.prefix}-creds`,
        roleArn: _var.crossaccount_arn,
    }, {
        provider: databricks.mws,
    });
    // register root bucket
    const thisMwsStorageConfigurations = new databricks.MwsStorageConfigurations("thisMwsStorageConfigurations", {
        accountId: databricksAccountId,
        storageConfigurationName: `${_var.prefix}-storage`,
        bucketName: _var.root_bucket,
    }, {
        provider: databricks.mws,
    });
    // register VPC
    const thisMwsNetworks = new databricks.MwsNetworks("thisMwsNetworks", {
        accountId: databricksAccountId,
        networkName: `${_var.prefix}-network`,
        vpcId: _var.vpc_id,
        subnetIds: _var.subnets_private,
        securityGroupIds: [_var.security_group],
    }, {
        provider: databricks.mws,
    });
    // create workspace in given VPC with DBFS on root bucket
    const thisMwsWorkspaces = new databricks.MwsWorkspaces("thisMwsWorkspaces", {
        accountId: databricksAccountId,
        workspaceName: _var.prefix,
        awsRegion: _var.region,
        credentialsId: thisMwsCredentials.credentialsId,
        storageConfigurationId: thisMwsStorageConfigurations.storageConfigurationId,
        networkId: thisMwsNetworks.networkId,
        token: {},
    }, {
        provider: databricks.mws,
    });
    export const databricksToken = thisMwsWorkspaces.token.apply(token => token?.tokenValue);
    
    import pulumi
    import pulumi_databricks as databricks
    
    config = pulumi.Config()
    databricks_account_id = config.require_object("databricksAccountId")
    mws = databricks.Provider("mws", host="https://accounts.cloud.databricks.com")
    # register cross-account ARN
    this_mws_credentials = databricks.MwsCredentials("thisMwsCredentials",
        account_id=databricks_account_id,
        credentials_name=f"{var['prefix']}-creds",
        role_arn=var["crossaccount_arn"],
        opts=pulumi.ResourceOptions(provider=databricks["mws"]))
    # register root bucket
    this_mws_storage_configurations = databricks.MwsStorageConfigurations("thisMwsStorageConfigurations",
        account_id=databricks_account_id,
        storage_configuration_name=f"{var['prefix']}-storage",
        bucket_name=var["root_bucket"],
        opts=pulumi.ResourceOptions(provider=databricks["mws"]))
    # register VPC
    this_mws_networks = databricks.MwsNetworks("thisMwsNetworks",
        account_id=databricks_account_id,
        network_name=f"{var['prefix']}-network",
        vpc_id=var["vpc_id"],
        subnet_ids=var["subnets_private"],
        security_group_ids=[var["security_group"]],
        opts=pulumi.ResourceOptions(provider=databricks["mws"]))
    # create workspace in given VPC with DBFS on root bucket
    this_mws_workspaces = databricks.MwsWorkspaces("thisMwsWorkspaces",
        account_id=databricks_account_id,
        workspace_name=var["prefix"],
        aws_region=var["region"],
        credentials_id=this_mws_credentials.credentials_id,
        storage_configuration_id=this_mws_storage_configurations.storage_configuration_id,
        network_id=this_mws_networks.network_id,
        token=databricks.MwsWorkspacesTokenArgs(),
        opts=pulumi.ResourceOptions(provider=databricks["mws"]))
    pulumi.export("databricksToken", this_mws_workspaces.token.token_value)
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		cfg := config.New(ctx, "")
    		databricksAccountId := cfg.RequireObject("databricksAccountId")
    		_, err := databricks.NewProvider(ctx, "mws", &databricks.ProviderArgs{
    			Host: pulumi.String("https://accounts.cloud.databricks.com"),
    		})
    		if err != nil {
    			return err
    		}
    		// register cross-account ARN
    		thisMwsCredentials, err := databricks.NewMwsCredentials(ctx, "thisMwsCredentials", &databricks.MwsCredentialsArgs{
    			AccountId:       pulumi.Any(databricksAccountId),
    			CredentialsName: pulumi.String(fmt.Sprintf("%v-creds", _var.Prefix)),
    			RoleArn:         pulumi.Any(_var.Crossaccount_arn),
    		}, pulumi.Provider(databricks.Mws))
    		if err != nil {
    			return err
    		}
    		// register root bucket
    		thisMwsStorageConfigurations, err := databricks.NewMwsStorageConfigurations(ctx, "thisMwsStorageConfigurations", &databricks.MwsStorageConfigurationsArgs{
    			AccountId:                pulumi.Any(databricksAccountId),
    			StorageConfigurationName: pulumi.String(fmt.Sprintf("%v-storage", _var.Prefix)),
    			BucketName:               pulumi.Any(_var.Root_bucket),
    		}, pulumi.Provider(databricks.Mws))
    		if err != nil {
    			return err
    		}
    		// register VPC
    		thisMwsNetworks, err := databricks.NewMwsNetworks(ctx, "thisMwsNetworks", &databricks.MwsNetworksArgs{
    			AccountId:   pulumi.Any(databricksAccountId),
    			NetworkName: pulumi.String(fmt.Sprintf("%v-network", _var.Prefix)),
    			VpcId:       pulumi.Any(_var.Vpc_id),
    			SubnetIds:   pulumi.Any(_var.Subnets_private),
    			SecurityGroupIds: pulumi.StringArray{
    				_var.Security_group,
    			},
    		}, pulumi.Provider(databricks.Mws))
    		if err != nil {
    			return err
    		}
    		// create workspace in given VPC with DBFS on root bucket
    		thisMwsWorkspaces, err := databricks.NewMwsWorkspaces(ctx, "thisMwsWorkspaces", &databricks.MwsWorkspacesArgs{
    			AccountId:              pulumi.Any(databricksAccountId),
    			WorkspaceName:          pulumi.Any(_var.Prefix),
    			AwsRegion:              pulumi.Any(_var.Region),
    			CredentialsId:          thisMwsCredentials.CredentialsId,
    			StorageConfigurationId: thisMwsStorageConfigurations.StorageConfigurationId,
    			NetworkId:              thisMwsNetworks.NetworkId,
    			Token:                  nil,
    		}, pulumi.Provider(databricks.Mws))
    		if err != nil {
    			return err
    		}
    		ctx.Export("databricksToken", thisMwsWorkspaces.Token.ApplyT(func(token databricks.MwsWorkspacesToken) (*string, error) {
    			return &token.TokenValue, nil
    		}).(pulumi.StringPtrOutput))
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var config = new Config();
        var databricksAccountId = config.RequireObject<dynamic>("databricksAccountId");
        var mws = new Databricks.Provider("mws", new()
        {
            Host = "https://accounts.cloud.databricks.com",
        });
    
        // register cross-account ARN
        var thisMwsCredentials = new Databricks.MwsCredentials("thisMwsCredentials", new()
        {
            AccountId = databricksAccountId,
            CredentialsName = $"{@var.Prefix}-creds",
            RoleArn = @var.Crossaccount_arn,
        }, new CustomResourceOptions
        {
            Provider = databricks.Mws,
        });
    
        // register root bucket
        var thisMwsStorageConfigurations = new Databricks.MwsStorageConfigurations("thisMwsStorageConfigurations", new()
        {
            AccountId = databricksAccountId,
            StorageConfigurationName = $"{@var.Prefix}-storage",
            BucketName = @var.Root_bucket,
        }, new CustomResourceOptions
        {
            Provider = databricks.Mws,
        });
    
        // register VPC
        var thisMwsNetworks = new Databricks.MwsNetworks("thisMwsNetworks", new()
        {
            AccountId = databricksAccountId,
            NetworkName = $"{@var.Prefix}-network",
            VpcId = @var.Vpc_id,
            SubnetIds = @var.Subnets_private,
            SecurityGroupIds = new[]
            {
                @var.Security_group,
            },
        }, new CustomResourceOptions
        {
            Provider = databricks.Mws,
        });
    
        // create workspace in given VPC with DBFS on root bucket
        var thisMwsWorkspaces = new Databricks.MwsWorkspaces("thisMwsWorkspaces", new()
        {
            AccountId = databricksAccountId,
            WorkspaceName = @var.Prefix,
            AwsRegion = @var.Region,
            CredentialsId = thisMwsCredentials.CredentialsId,
            StorageConfigurationId = thisMwsStorageConfigurations.StorageConfigurationId,
            NetworkId = thisMwsNetworks.NetworkId,
            Token = null,
        }, new CustomResourceOptions
        {
            Provider = databricks.Mws,
        });
    
        return new Dictionary<string, object?>
        {
            ["databricksToken"] = thisMwsWorkspaces.Token.Apply(token => token?.TokenValue),
        };
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Provider;
    import com.pulumi.databricks.ProviderArgs;
    import com.pulumi.databricks.MwsCredentials;
    import com.pulumi.databricks.MwsCredentialsArgs;
    import com.pulumi.databricks.MwsStorageConfigurations;
    import com.pulumi.databricks.MwsStorageConfigurationsArgs;
    import com.pulumi.databricks.MwsNetworks;
    import com.pulumi.databricks.MwsNetworksArgs;
    import com.pulumi.databricks.MwsWorkspaces;
    import com.pulumi.databricks.MwsWorkspacesArgs;
    import com.pulumi.databricks.inputs.MwsWorkspacesTokenArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var config = ctx.config();
            final var databricksAccountId = config.get("databricksAccountId");
            var mws = new Provider("mws", ProviderArgs.builder()        
                .host("https://accounts.cloud.databricks.com")
                .build());
    
            var thisMwsCredentials = new MwsCredentials("thisMwsCredentials", MwsCredentialsArgs.builder()        
                .accountId(databricksAccountId)
                .credentialsName(String.format("%s-creds", var_.prefix()))
                .roleArn(var_.crossaccount_arn())
                .build(), CustomResourceOptions.builder()
                    .provider(databricks.mws())
                    .build());
    
            var thisMwsStorageConfigurations = new MwsStorageConfigurations("thisMwsStorageConfigurations", MwsStorageConfigurationsArgs.builder()        
                .accountId(databricksAccountId)
                .storageConfigurationName(String.format("%s-storage", var_.prefix()))
                .bucketName(var_.root_bucket())
                .build(), CustomResourceOptions.builder()
                    .provider(databricks.mws())
                    .build());
    
            var thisMwsNetworks = new MwsNetworks("thisMwsNetworks", MwsNetworksArgs.builder()        
                .accountId(databricksAccountId)
                .networkName(String.format("%s-network", var_.prefix()))
                .vpcId(var_.vpc_id())
                .subnetIds(var_.subnets_private())
                .securityGroupIds(var_.security_group())
                .build(), CustomResourceOptions.builder()
                    .provider(databricks.mws())
                    .build());
    
            var thisMwsWorkspaces = new MwsWorkspaces("thisMwsWorkspaces", MwsWorkspacesArgs.builder()        
                .accountId(databricksAccountId)
                .workspaceName(var_.prefix())
                .awsRegion(var_.region())
                .credentialsId(thisMwsCredentials.credentialsId())
                .storageConfigurationId(thisMwsStorageConfigurations.storageConfigurationId())
                .networkId(thisMwsNetworks.networkId())
                .token()
                .build(), CustomResourceOptions.builder()
                    .provider(databricks.mws())
                    .build());
    
            ctx.export("databricksToken", thisMwsWorkspaces.token().applyValue(token -> token.tokenValue()));
        }
    }
    
    configuration:
      databricksAccountId:
        type: dynamic
    resources:
      mws:
        type: pulumi:providers:databricks
        properties:
          host: https://accounts.cloud.databricks.com
      # register cross-account ARN
      thisMwsCredentials:
        type: databricks:MwsCredentials
        properties:
          accountId: ${databricksAccountId}
          credentialsName: ${var.prefix}-creds
          roleArn: ${var.crossaccount_arn}
        options:
          provider: ${databricks.mws}
      # register root bucket
      thisMwsStorageConfigurations:
        type: databricks:MwsStorageConfigurations
        properties:
          accountId: ${databricksAccountId}
          storageConfigurationName: ${var.prefix}-storage
          bucketName: ${var.root_bucket}
        options:
          provider: ${databricks.mws}
      # register VPC
      thisMwsNetworks:
        type: databricks:MwsNetworks
        properties:
          accountId: ${databricksAccountId}
          networkName: ${var.prefix}-network
          vpcId: ${var.vpc_id}
          subnetIds: ${var.subnets_private}
          securityGroupIds:
            - ${var.security_group}
        options:
          provider: ${databricks.mws}
      # create workspace in given VPC with DBFS on root bucket
      thisMwsWorkspaces:
        type: databricks:MwsWorkspaces
        properties:
          accountId: ${databricksAccountId}
          workspaceName: ${var.prefix}
          awsRegion: ${var.region}
          credentialsId: ${thisMwsCredentials.credentialsId}
          storageConfigurationId: ${thisMwsStorageConfigurations.storageConfigurationId}
          networkId: ${thisMwsNetworks.networkId}
          token: {}
        options:
          provider: ${databricks.mws}
    outputs:
      databricksToken: ${thisMwsWorkspaces.token.tokenValue}
    

    Creating a Databricks on AWS workspace with Databricks-Managed VPC

    VPCs

    By default, Databricks creates a VPC in your AWS account for each workspace. Databricks uses it for running clusters in the workspace. Optionally, you can use your VPC for the workspace, using the feature customer-managed VPC. Databricks recommends that you provide your VPC with databricks.MwsNetworks so that you can configure it according to your organization’s enterprise cloud standards while still conforming to Databricks requirements. You cannot migrate an existing workspace to your VPC. Please see the difference described through IAM policy actions on this page.

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    import * as databricks from "@pulumi/databricks";
    import * as random from "@pulumi/random";
    
    const config = new pulumi.Config();
    const databricksAccountId = config.requireObject("databricksAccountId");
    const naming = new random.RandomString("naming", {
        special: false,
        upper: false,
        length: 6,
    });
    const prefix = pulumi.interpolate`dltp${naming.result}`;
    const thisAwsAssumeRolePolicy = databricks.getAwsAssumeRolePolicy({
        externalId: databricksAccountId,
    });
    const crossAccountRole = new aws.iam.Role("crossAccountRole", {
        assumeRolePolicy: thisAwsAssumeRolePolicy.then(thisAwsAssumeRolePolicy => thisAwsAssumeRolePolicy.json),
        tags: _var.tags,
    });
    const thisAwsCrossAccountPolicy = databricks.getAwsCrossAccountPolicy({});
    const thisRolePolicy = new aws.iam.RolePolicy("thisRolePolicy", {
        role: crossAccountRole.id,
        policy: thisAwsCrossAccountPolicy.then(thisAwsCrossAccountPolicy => thisAwsCrossAccountPolicy.json),
    });
    const thisMwsCredentials = new databricks.MwsCredentials("thisMwsCredentials", {
        accountId: databricksAccountId,
        credentialsName: `${prefix}-creds`,
        roleArn: crossAccountRole.arn,
    }, {
        provider: databricks.mws,
    });
    const rootStorageBucketBucketV2 = new aws.s3.BucketV2("rootStorageBucketBucketV2", {
        acl: "private",
        forceDestroy: true,
        tags: _var.tags,
    });
    const rootVersioning = new aws.s3.BucketVersioningV2("rootVersioning", {
        bucket: rootStorageBucketBucketV2.id,
        versioningConfiguration: {
            status: "Disabled",
        },
    });
    const rootStorageBucketBucketServerSideEncryptionConfigurationV2 = new aws.s3.BucketServerSideEncryptionConfigurationV2("rootStorageBucketBucketServerSideEncryptionConfigurationV2", {
        bucket: rootStorageBucketBucketV2.bucket,
        rules: [{
            applyServerSideEncryptionByDefault: {
                sseAlgorithm: "AES256",
            },
        }],
    });
    const rootStorageBucketBucketPublicAccessBlock = new aws.s3.BucketPublicAccessBlock("rootStorageBucketBucketPublicAccessBlock", {
        bucket: rootStorageBucketBucketV2.id,
        blockPublicAcls: true,
        blockPublicPolicy: true,
        ignorePublicAcls: true,
        restrictPublicBuckets: true,
    }, {
        dependsOn: [rootStorageBucketBucketV2],
    });
    const thisAwsBucketPolicy = databricks.getAwsBucketPolicyOutput({
        bucket: rootStorageBucketBucketV2.bucket,
    });
    const rootBucketPolicy = new aws.s3.BucketPolicy("rootBucketPolicy", {
        bucket: rootStorageBucketBucketV2.id,
        policy: thisAwsBucketPolicy.apply(thisAwsBucketPolicy => thisAwsBucketPolicy.json),
    }, {
        dependsOn: [rootStorageBucketBucketPublicAccessBlock],
    });
    const thisMwsStorageConfigurations = new databricks.MwsStorageConfigurations("thisMwsStorageConfigurations", {
        accountId: databricksAccountId,
        storageConfigurationName: `${prefix}-storage`,
        bucketName: rootStorageBucketBucketV2.bucket,
    }, {
        provider: databricks.mws,
    });
    const thisMwsWorkspaces = new databricks.MwsWorkspaces("thisMwsWorkspaces", {
        accountId: databricksAccountId,
        workspaceName: prefix,
        awsRegion: "us-east-1",
        credentialsId: thisMwsCredentials.credentialsId,
        storageConfigurationId: thisMwsStorageConfigurations.storageConfigurationId,
        token: {},
        customTags: {
            SoldToCode: "1234",
        },
    }, {
        provider: databricks.mws,
    });
    export const databricksToken = thisMwsWorkspaces.token.apply(token => token?.tokenValue);
    
    import pulumi
    import pulumi_aws as aws
    import pulumi_databricks as databricks
    import pulumi_random as random
    
    config = pulumi.Config()
    databricks_account_id = config.require_object("databricksAccountId")
    naming = random.RandomString("naming",
        special=False,
        upper=False,
        length=6)
    prefix = naming.result.apply(lambda result: f"dltp{result}")
    this_aws_assume_role_policy = databricks.get_aws_assume_role_policy(external_id=databricks_account_id)
    cross_account_role = aws.iam.Role("crossAccountRole",
        assume_role_policy=this_aws_assume_role_policy.json,
        tags=var["tags"])
    this_aws_cross_account_policy = databricks.get_aws_cross_account_policy()
    this_role_policy = aws.iam.RolePolicy("thisRolePolicy",
        role=cross_account_role.id,
        policy=this_aws_cross_account_policy.json)
    this_mws_credentials = databricks.MwsCredentials("thisMwsCredentials",
        account_id=databricks_account_id,
        credentials_name=f"{prefix}-creds",
        role_arn=cross_account_role.arn,
        opts=pulumi.ResourceOptions(provider=databricks["mws"]))
    root_storage_bucket_bucket_v2 = aws.s3.BucketV2("rootStorageBucketBucketV2",
        acl="private",
        force_destroy=True,
        tags=var["tags"])
    root_versioning = aws.s3.BucketVersioningV2("rootVersioning",
        bucket=root_storage_bucket_bucket_v2.id,
        versioning_configuration=aws.s3.BucketVersioningV2VersioningConfigurationArgs(
            status="Disabled",
        ))
    root_storage_bucket_bucket_server_side_encryption_configuration_v2 = aws.s3.BucketServerSideEncryptionConfigurationV2("rootStorageBucketBucketServerSideEncryptionConfigurationV2",
        bucket=root_storage_bucket_bucket_v2.bucket,
        rules=[aws.s3.BucketServerSideEncryptionConfigurationV2RuleArgs(
            apply_server_side_encryption_by_default=aws.s3.BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefaultArgs(
                sse_algorithm="AES256",
            ),
        )])
    root_storage_bucket_bucket_public_access_block = aws.s3.BucketPublicAccessBlock("rootStorageBucketBucketPublicAccessBlock",
        bucket=root_storage_bucket_bucket_v2.id,
        block_public_acls=True,
        block_public_policy=True,
        ignore_public_acls=True,
        restrict_public_buckets=True,
        opts=pulumi.ResourceOptions(depends_on=[root_storage_bucket_bucket_v2]))
    this_aws_bucket_policy = databricks.get_aws_bucket_policy_output(bucket=root_storage_bucket_bucket_v2.bucket)
    root_bucket_policy = aws.s3.BucketPolicy("rootBucketPolicy",
        bucket=root_storage_bucket_bucket_v2.id,
        policy=this_aws_bucket_policy.json,
        opts=pulumi.ResourceOptions(depends_on=[root_storage_bucket_bucket_public_access_block]))
    this_mws_storage_configurations = databricks.MwsStorageConfigurations("thisMwsStorageConfigurations",
        account_id=databricks_account_id,
        storage_configuration_name=f"{prefix}-storage",
        bucket_name=root_storage_bucket_bucket_v2.bucket,
        opts=pulumi.ResourceOptions(provider=databricks["mws"]))
    this_mws_workspaces = databricks.MwsWorkspaces("thisMwsWorkspaces",
        account_id=databricks_account_id,
        workspace_name=prefix,
        aws_region="us-east-1",
        credentials_id=this_mws_credentials.credentials_id,
        storage_configuration_id=this_mws_storage_configurations.storage_configuration_id,
        token=databricks.MwsWorkspacesTokenArgs(),
        custom_tags={
            "SoldToCode": "1234",
        },
        opts=pulumi.ResourceOptions(provider=databricks["mws"]))
    pulumi.export("databricksToken", this_mws_workspaces.token.token_value)
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam"
    	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/s3"
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-random/sdk/v4/go/random"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		cfg := config.New(ctx, "")
    		databricksAccountId := cfg.RequireObject("databricksAccountId")
    		naming, err := random.NewRandomString(ctx, "naming", &random.RandomStringArgs{
    			Special: pulumi.Bool(false),
    			Upper:   pulumi.Bool(false),
    			Length:  pulumi.Int(6),
    		})
    		if err != nil {
    			return err
    		}
    		prefix := naming.Result.ApplyT(func(result string) (string, error) {
    			return fmt.Sprintf("dltp%v", result), nil
    		}).(pulumi.StringOutput)
    		thisAwsAssumeRolePolicy, err := databricks.GetAwsAssumeRolePolicy(ctx, &databricks.GetAwsAssumeRolePolicyArgs{
    			ExternalId: databricksAccountId,
    		}, nil)
    		if err != nil {
    			return err
    		}
    		crossAccountRole, err := iam.NewRole(ctx, "crossAccountRole", &iam.RoleArgs{
    			AssumeRolePolicy: pulumi.String(thisAwsAssumeRolePolicy.Json),
    			Tags:             pulumi.Any(_var.Tags),
    		})
    		if err != nil {
    			return err
    		}
    		thisAwsCrossAccountPolicy, err := databricks.GetAwsCrossAccountPolicy(ctx, nil, nil)
    		if err != nil {
    			return err
    		}
    		_, err = iam.NewRolePolicy(ctx, "thisRolePolicy", &iam.RolePolicyArgs{
    			Role:   crossAccountRole.ID(),
    			Policy: pulumi.String(thisAwsCrossAccountPolicy.Json),
    		})
    		if err != nil {
    			return err
    		}
    		thisMwsCredentials, err := databricks.NewMwsCredentials(ctx, "thisMwsCredentials", &databricks.MwsCredentialsArgs{
    			AccountId:       pulumi.Any(databricksAccountId),
    			CredentialsName: pulumi.String(fmt.Sprintf("%v-creds", prefix)),
    			RoleArn:         crossAccountRole.Arn,
    		}, pulumi.Provider(databricks.Mws))
    		if err != nil {
    			return err
    		}
    		rootStorageBucketBucketV2, err := s3.NewBucketV2(ctx, "rootStorageBucketBucketV2", &s3.BucketV2Args{
    			Acl:          pulumi.String("private"),
    			ForceDestroy: pulumi.Bool(true),
    			Tags:         pulumi.Any(_var.Tags),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = s3.NewBucketVersioningV2(ctx, "rootVersioning", &s3.BucketVersioningV2Args{
    			Bucket: rootStorageBucketBucketV2.ID(),
    			VersioningConfiguration: &s3.BucketVersioningV2VersioningConfigurationArgs{
    				Status: pulumi.String("Disabled"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = s3.NewBucketServerSideEncryptionConfigurationV2(ctx, "rootStorageBucketBucketServerSideEncryptionConfigurationV2", &s3.BucketServerSideEncryptionConfigurationV2Args{
    			Bucket: rootStorageBucketBucketV2.Bucket,
    			Rules: s3.BucketServerSideEncryptionConfigurationV2RuleArray{
    				&s3.BucketServerSideEncryptionConfigurationV2RuleArgs{
    					ApplyServerSideEncryptionByDefault: &s3.BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefaultArgs{
    						SseAlgorithm: pulumi.String("AES256"),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		rootStorageBucketBucketPublicAccessBlock, err := s3.NewBucketPublicAccessBlock(ctx, "rootStorageBucketBucketPublicAccessBlock", &s3.BucketPublicAccessBlockArgs{
    			Bucket:                rootStorageBucketBucketV2.ID(),
    			BlockPublicAcls:       pulumi.Bool(true),
    			BlockPublicPolicy:     pulumi.Bool(true),
    			IgnorePublicAcls:      pulumi.Bool(true),
    			RestrictPublicBuckets: pulumi.Bool(true),
    		}, pulumi.DependsOn([]pulumi.Resource{
    			rootStorageBucketBucketV2,
    		}))
    		if err != nil {
    			return err
    		}
    		thisAwsBucketPolicy := databricks.GetAwsBucketPolicyOutput(ctx, databricks.GetAwsBucketPolicyOutputArgs{
    			Bucket: rootStorageBucketBucketV2.Bucket,
    		}, nil)
    		_, err = s3.NewBucketPolicy(ctx, "rootBucketPolicy", &s3.BucketPolicyArgs{
    			Bucket: rootStorageBucketBucketV2.ID(),
    			Policy: thisAwsBucketPolicy.ApplyT(func(thisAwsBucketPolicy databricks.GetAwsBucketPolicyResult) (*string, error) {
    				return &thisAwsBucketPolicy.Json, nil
    			}).(pulumi.StringPtrOutput),
    		}, pulumi.DependsOn([]pulumi.Resource{
    			rootStorageBucketBucketPublicAccessBlock,
    		}))
    		if err != nil {
    			return err
    		}
    		thisMwsStorageConfigurations, err := databricks.NewMwsStorageConfigurations(ctx, "thisMwsStorageConfigurations", &databricks.MwsStorageConfigurationsArgs{
    			AccountId:                pulumi.Any(databricksAccountId),
    			StorageConfigurationName: pulumi.String(fmt.Sprintf("%v-storage", prefix)),
    			BucketName:               rootStorageBucketBucketV2.Bucket,
    		}, pulumi.Provider(databricks.Mws))
    		if err != nil {
    			return err
    		}
    		thisMwsWorkspaces, err := databricks.NewMwsWorkspaces(ctx, "thisMwsWorkspaces", &databricks.MwsWorkspacesArgs{
    			AccountId:              pulumi.Any(databricksAccountId),
    			WorkspaceName:          pulumi.String(prefix),
    			AwsRegion:              pulumi.String("us-east-1"),
    			CredentialsId:          thisMwsCredentials.CredentialsId,
    			StorageConfigurationId: thisMwsStorageConfigurations.StorageConfigurationId,
    			Token:                  nil,
    			CustomTags: pulumi.Map{
    				"SoldToCode": pulumi.Any("1234"),
    			},
    		}, pulumi.Provider(databricks.Mws))
    		if err != nil {
    			return err
    		}
    		ctx.Export("databricksToken", thisMwsWorkspaces.Token.ApplyT(func(token databricks.MwsWorkspacesToken) (*string, error) {
    			return &token.TokenValue, nil
    		}).(pulumi.StringPtrOutput))
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    using Databricks = Pulumi.Databricks;
    using Random = Pulumi.Random;
    
    return await Deployment.RunAsync(() => 
    {
        var config = new Config();
        var databricksAccountId = config.RequireObject<dynamic>("databricksAccountId");
        var naming = new Random.RandomString("naming", new()
        {
            Special = false,
            Upper = false,
            Length = 6,
        });
    
        var prefix = naming.Result.Apply(result => $"dltp{result}");
    
        var thisAwsAssumeRolePolicy = Databricks.GetAwsAssumeRolePolicy.Invoke(new()
        {
            ExternalId = databricksAccountId,
        });
    
        var crossAccountRole = new Aws.Iam.Role("crossAccountRole", new()
        {
            AssumeRolePolicy = thisAwsAssumeRolePolicy.Apply(getAwsAssumeRolePolicyResult => getAwsAssumeRolePolicyResult.Json),
            Tags = @var.Tags,
        });
    
        var thisAwsCrossAccountPolicy = Databricks.GetAwsCrossAccountPolicy.Invoke();
    
        var thisRolePolicy = new Aws.Iam.RolePolicy("thisRolePolicy", new()
        {
            Role = crossAccountRole.Id,
            Policy = thisAwsCrossAccountPolicy.Apply(getAwsCrossAccountPolicyResult => getAwsCrossAccountPolicyResult.Json),
        });
    
        var thisMwsCredentials = new Databricks.MwsCredentials("thisMwsCredentials", new()
        {
            AccountId = databricksAccountId,
            CredentialsName = $"{prefix}-creds",
            RoleArn = crossAccountRole.Arn,
        }, new CustomResourceOptions
        {
            Provider = databricks.Mws,
        });
    
        var rootStorageBucketBucketV2 = new Aws.S3.BucketV2("rootStorageBucketBucketV2", new()
        {
            Acl = "private",
            ForceDestroy = true,
            Tags = @var.Tags,
        });
    
        var rootVersioning = new Aws.S3.BucketVersioningV2("rootVersioning", new()
        {
            Bucket = rootStorageBucketBucketV2.Id,
            VersioningConfiguration = new Aws.S3.Inputs.BucketVersioningV2VersioningConfigurationArgs
            {
                Status = "Disabled",
            },
        });
    
        var rootStorageBucketBucketServerSideEncryptionConfigurationV2 = new Aws.S3.BucketServerSideEncryptionConfigurationV2("rootStorageBucketBucketServerSideEncryptionConfigurationV2", new()
        {
            Bucket = rootStorageBucketBucketV2.Bucket,
            Rules = new[]
            {
                new Aws.S3.Inputs.BucketServerSideEncryptionConfigurationV2RuleArgs
                {
                    ApplyServerSideEncryptionByDefault = new Aws.S3.Inputs.BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefaultArgs
                    {
                        SseAlgorithm = "AES256",
                    },
                },
            },
        });
    
        var rootStorageBucketBucketPublicAccessBlock = new Aws.S3.BucketPublicAccessBlock("rootStorageBucketBucketPublicAccessBlock", new()
        {
            Bucket = rootStorageBucketBucketV2.Id,
            BlockPublicAcls = true,
            BlockPublicPolicy = true,
            IgnorePublicAcls = true,
            RestrictPublicBuckets = true,
        }, new CustomResourceOptions
        {
            DependsOn = new[]
            {
                rootStorageBucketBucketV2,
            },
        });
    
        var thisAwsBucketPolicy = Databricks.GetAwsBucketPolicy.Invoke(new()
        {
            Bucket = rootStorageBucketBucketV2.Bucket,
        });
    
        var rootBucketPolicy = new Aws.S3.BucketPolicy("rootBucketPolicy", new()
        {
            Bucket = rootStorageBucketBucketV2.Id,
            Policy = thisAwsBucketPolicy.Apply(getAwsBucketPolicyResult => getAwsBucketPolicyResult.Json),
        }, new CustomResourceOptions
        {
            DependsOn = new[]
            {
                rootStorageBucketBucketPublicAccessBlock,
            },
        });
    
        var thisMwsStorageConfigurations = new Databricks.MwsStorageConfigurations("thisMwsStorageConfigurations", new()
        {
            AccountId = databricksAccountId,
            StorageConfigurationName = $"{prefix}-storage",
            BucketName = rootStorageBucketBucketV2.Bucket,
        }, new CustomResourceOptions
        {
            Provider = databricks.Mws,
        });
    
        var thisMwsWorkspaces = new Databricks.MwsWorkspaces("thisMwsWorkspaces", new()
        {
            AccountId = databricksAccountId,
            WorkspaceName = prefix,
            AwsRegion = "us-east-1",
            CredentialsId = thisMwsCredentials.CredentialsId,
            StorageConfigurationId = thisMwsStorageConfigurations.StorageConfigurationId,
            Token = null,
            CustomTags = 
            {
                { "SoldToCode", "1234" },
            },
        }, new CustomResourceOptions
        {
            Provider = databricks.Mws,
        });
    
        return new Dictionary<string, object?>
        {
            ["databricksToken"] = thisMwsWorkspaces.Token.Apply(token => token?.TokenValue),
        };
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.random.RandomString;
    import com.pulumi.random.RandomStringArgs;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetAwsAssumeRolePolicyArgs;
    import com.pulumi.aws.iam.Role;
    import com.pulumi.aws.iam.RoleArgs;
    import com.pulumi.databricks.inputs.GetAwsCrossAccountPolicyArgs;
    import com.pulumi.aws.iam.RolePolicy;
    import com.pulumi.aws.iam.RolePolicyArgs;
    import com.pulumi.databricks.MwsCredentials;
    import com.pulumi.databricks.MwsCredentialsArgs;
    import com.pulumi.aws.s3.BucketV2;
    import com.pulumi.aws.s3.BucketV2Args;
    import com.pulumi.aws.s3.BucketVersioningV2;
    import com.pulumi.aws.s3.BucketVersioningV2Args;
    import com.pulumi.aws.s3.inputs.BucketVersioningV2VersioningConfigurationArgs;
    import com.pulumi.aws.s3.BucketServerSideEncryptionConfigurationV2;
    import com.pulumi.aws.s3.BucketServerSideEncryptionConfigurationV2Args;
    import com.pulumi.aws.s3.inputs.BucketServerSideEncryptionConfigurationV2RuleArgs;
    import com.pulumi.aws.s3.inputs.BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefaultArgs;
    import com.pulumi.aws.s3.BucketPublicAccessBlock;
    import com.pulumi.aws.s3.BucketPublicAccessBlockArgs;
    import com.pulumi.databricks.inputs.GetAwsBucketPolicyArgs;
    import com.pulumi.aws.s3.BucketPolicy;
    import com.pulumi.aws.s3.BucketPolicyArgs;
    import com.pulumi.databricks.MwsStorageConfigurations;
    import com.pulumi.databricks.MwsStorageConfigurationsArgs;
    import com.pulumi.databricks.MwsWorkspaces;
    import com.pulumi.databricks.MwsWorkspacesArgs;
    import com.pulumi.databricks.inputs.MwsWorkspacesTokenArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var config = ctx.config();
            final var databricksAccountId = config.get("databricksAccountId");
            var naming = new RandomString("naming", RandomStringArgs.builder()        
                .special(false)
                .upper(false)
                .length(6)
                .build());
    
            final var prefix = naming.result().applyValue(result -> String.format("dltp%s", result));
    
            final var thisAwsAssumeRolePolicy = DatabricksFunctions.getAwsAssumeRolePolicy(GetAwsAssumeRolePolicyArgs.builder()
                .externalId(databricksAccountId)
                .build());
    
            var crossAccountRole = new Role("crossAccountRole", RoleArgs.builder()        
                .assumeRolePolicy(thisAwsAssumeRolePolicy.applyValue(getAwsAssumeRolePolicyResult -> getAwsAssumeRolePolicyResult.json()))
                .tags(var_.tags())
                .build());
    
            final var thisAwsCrossAccountPolicy = DatabricksFunctions.getAwsCrossAccountPolicy();
    
            var thisRolePolicy = new RolePolicy("thisRolePolicy", RolePolicyArgs.builder()        
                .role(crossAccountRole.id())
                .policy(thisAwsCrossAccountPolicy.applyValue(getAwsCrossAccountPolicyResult -> getAwsCrossAccountPolicyResult.json()))
                .build());
    
            var thisMwsCredentials = new MwsCredentials("thisMwsCredentials", MwsCredentialsArgs.builder()        
                .accountId(databricksAccountId)
                .credentialsName(String.format("%s-creds", prefix))
                .roleArn(crossAccountRole.arn())
                .build(), CustomResourceOptions.builder()
                    .provider(databricks.mws())
                    .build());
    
            var rootStorageBucketBucketV2 = new BucketV2("rootStorageBucketBucketV2", BucketV2Args.builder()        
                .acl("private")
                .forceDestroy(true)
                .tags(var_.tags())
                .build());
    
            var rootVersioning = new BucketVersioningV2("rootVersioning", BucketVersioningV2Args.builder()        
                .bucket(rootStorageBucketBucketV2.id())
                .versioningConfiguration(BucketVersioningV2VersioningConfigurationArgs.builder()
                    .status("Disabled")
                    .build())
                .build());
    
            var rootStorageBucketBucketServerSideEncryptionConfigurationV2 = new BucketServerSideEncryptionConfigurationV2("rootStorageBucketBucketServerSideEncryptionConfigurationV2", BucketServerSideEncryptionConfigurationV2Args.builder()        
                .bucket(rootStorageBucketBucketV2.bucket())
                .rules(BucketServerSideEncryptionConfigurationV2RuleArgs.builder()
                    .applyServerSideEncryptionByDefault(BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefaultArgs.builder()
                        .sseAlgorithm("AES256")
                        .build())
                    .build())
                .build());
    
            var rootStorageBucketBucketPublicAccessBlock = new BucketPublicAccessBlock("rootStorageBucketBucketPublicAccessBlock", BucketPublicAccessBlockArgs.builder()        
                .bucket(rootStorageBucketBucketV2.id())
                .blockPublicAcls(true)
                .blockPublicPolicy(true)
                .ignorePublicAcls(true)
                .restrictPublicBuckets(true)
                .build(), CustomResourceOptions.builder()
                    .dependsOn(rootStorageBucketBucketV2)
                    .build());
    
            final var thisAwsBucketPolicy = DatabricksFunctions.getAwsBucketPolicy(GetAwsBucketPolicyArgs.builder()
                .bucket(rootStorageBucketBucketV2.bucket())
                .build());
    
            var rootBucketPolicy = new BucketPolicy("rootBucketPolicy", BucketPolicyArgs.builder()        
                .bucket(rootStorageBucketBucketV2.id())
                .policy(thisAwsBucketPolicy.applyValue(getAwsBucketPolicyResult -> getAwsBucketPolicyResult).applyValue(thisAwsBucketPolicy -> thisAwsBucketPolicy.applyValue(getAwsBucketPolicyResult -> getAwsBucketPolicyResult.json())))
                .build(), CustomResourceOptions.builder()
                    .dependsOn(rootStorageBucketBucketPublicAccessBlock)
                    .build());
    
            var thisMwsStorageConfigurations = new MwsStorageConfigurations("thisMwsStorageConfigurations", MwsStorageConfigurationsArgs.builder()        
                .accountId(databricksAccountId)
                .storageConfigurationName(String.format("%s-storage", prefix))
                .bucketName(rootStorageBucketBucketV2.bucket())
                .build(), CustomResourceOptions.builder()
                    .provider(databricks.mws())
                    .build());
    
            var thisMwsWorkspaces = new MwsWorkspaces("thisMwsWorkspaces", MwsWorkspacesArgs.builder()        
                .accountId(databricksAccountId)
                .workspaceName(prefix)
                .awsRegion("us-east-1")
                .credentialsId(thisMwsCredentials.credentialsId())
                .storageConfigurationId(thisMwsStorageConfigurations.storageConfigurationId())
                .token()
                .customTags(Map.of("SoldToCode", "1234"))
                .build(), CustomResourceOptions.builder()
                    .provider(databricks.mws())
                    .build());
    
            ctx.export("databricksToken", thisMwsWorkspaces.token().applyValue(token -> token.tokenValue()));
        }
    }
    
    configuration:
      databricksAccountId:
        type: dynamic
    resources:
      naming:
        type: random:RandomString
        properties:
          special: false
          upper: false
          length: 6
      crossAccountRole:
        type: aws:iam:Role
        properties:
          assumeRolePolicy: ${thisAwsAssumeRolePolicy.json}
          tags: ${var.tags}
      thisRolePolicy:
        type: aws:iam:RolePolicy
        properties:
          role: ${crossAccountRole.id}
          policy: ${thisAwsCrossAccountPolicy.json}
      thisMwsCredentials:
        type: databricks:MwsCredentials
        properties:
          accountId: ${databricksAccountId}
          credentialsName: ${prefix}-creds
          roleArn: ${crossAccountRole.arn}
        options:
          provider: ${databricks.mws}
      rootStorageBucketBucketV2:
        type: aws:s3:BucketV2
        properties:
          acl: private
          forceDestroy: true
          tags: ${var.tags}
      rootVersioning:
        type: aws:s3:BucketVersioningV2
        properties:
          bucket: ${rootStorageBucketBucketV2.id}
          versioningConfiguration:
            status: Disabled
      rootStorageBucketBucketServerSideEncryptionConfigurationV2:
        type: aws:s3:BucketServerSideEncryptionConfigurationV2
        properties:
          bucket: ${rootStorageBucketBucketV2.bucket}
          rules:
            - applyServerSideEncryptionByDefault:
                sseAlgorithm: AES256
      rootStorageBucketBucketPublicAccessBlock:
        type: aws:s3:BucketPublicAccessBlock
        properties:
          bucket: ${rootStorageBucketBucketV2.id}
          blockPublicAcls: true
          blockPublicPolicy: true
          ignorePublicAcls: true
          restrictPublicBuckets: true
        options:
          dependson:
            - ${rootStorageBucketBucketV2}
      rootBucketPolicy:
        type: aws:s3:BucketPolicy
        properties:
          bucket: ${rootStorageBucketBucketV2.id}
          policy: ${thisAwsBucketPolicy.json}
        options:
          dependson:
            - ${rootStorageBucketBucketPublicAccessBlock}
      thisMwsStorageConfigurations:
        type: databricks:MwsStorageConfigurations
        properties:
          accountId: ${databricksAccountId}
          storageConfigurationName: ${prefix}-storage
          bucketName: ${rootStorageBucketBucketV2.bucket}
        options:
          provider: ${databricks.mws}
      thisMwsWorkspaces:
        type: databricks:MwsWorkspaces
        properties:
          accountId: ${databricksAccountId}
          workspaceName: ${prefix}
          awsRegion: us-east-1
          credentialsId: ${thisMwsCredentials.credentialsId}
          storageConfigurationId: ${thisMwsStorageConfigurations.storageConfigurationId}
          token: {}
          # Optional Custom Tags
          customTags:
            SoldToCode: '1234'
        options:
          provider: ${databricks.mws}
    variables:
      prefix: dltp${naming.result}
      thisAwsAssumeRolePolicy:
        fn::invoke:
          Function: databricks:getAwsAssumeRolePolicy
          Arguments:
            externalId: ${databricksAccountId}
      thisAwsCrossAccountPolicy:
        fn::invoke:
          Function: databricks:getAwsCrossAccountPolicy
          Arguments: {}
      thisAwsBucketPolicy:
        fn::invoke:
          Function: databricks:getAwsBucketPolicy
          Arguments:
            bucket: ${rootStorageBucketBucketV2.bucket}
    outputs:
      databricksToken: ${thisMwsWorkspaces.token.tokenValue}
    

    In order to create a Databricks Workspace that leverages AWS PrivateLink please ensure that you have read and understood the Enable Private Link documentation and then customise the example above with the relevant examples from mws_vpc_endpoint, mws_private_access_settings and mws_networks.

    Creating a Databricks on GCP workspace

    To get workspace running, you have to configure a network object:

    • databricks.MwsNetworks - (optional, but recommended) You can share one customer-managed VPC with multiple workspaces in a single account. You do not have to create a new VPC for each workspace. However, you cannot reuse subnets with other resources, including other workspaces or non-Databricks resources. If you plan to share one VPC with multiple workspaces, be sure to size your VPC and subnets accordingly. Because a Databricks databricks.MwsNetworks encapsulates this information, you cannot reuse it across workspaces.
    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const config = new pulumi.Config();
    const databricksAccountId = config.requireObject("databricksAccountId");
    const databricksGoogleServiceAccount = config.requireObject("databricksGoogleServiceAccount");
    const googleProject = config.requireObject("googleProject");
    const mws = new databricks.Provider("mws", {host: "https://accounts.gcp.databricks.com"});
    // register VPC
    const thisMwsNetworks = new databricks.MwsNetworks("thisMwsNetworks", {
        accountId: databricksAccountId,
        networkName: `${_var.prefix}-network`,
        gcpNetworkInfo: {
            networkProjectId: googleProject,
            vpcId: _var.vpc_id,
            subnetId: _var.subnet_id,
            subnetRegion: _var.subnet_region,
            podIpRangeName: "pods",
            serviceIpRangeName: "svc",
        },
    });
    // create workspace in given VPC
    const thisMwsWorkspaces = new databricks.MwsWorkspaces("thisMwsWorkspaces", {
        accountId: databricksAccountId,
        workspaceName: _var.prefix,
        location: _var.subnet_region,
        cloudResourceContainer: {
            gcp: {
                projectId: googleProject,
            },
        },
        networkId: thisMwsNetworks.networkId,
        gkeConfig: {
            connectivityType: "PRIVATE_NODE_PUBLIC_MASTER",
            masterIpRange: "10.3.0.0/28",
        },
        token: {},
    });
    export const databricksToken = thisMwsWorkspaces.token.apply(token => token?.tokenValue);
    
    import pulumi
    import pulumi_databricks as databricks
    
    config = pulumi.Config()
    databricks_account_id = config.require_object("databricksAccountId")
    databricks_google_service_account = config.require_object("databricksGoogleServiceAccount")
    google_project = config.require_object("googleProject")
    mws = databricks.Provider("mws", host="https://accounts.gcp.databricks.com")
    # register VPC
    this_mws_networks = databricks.MwsNetworks("thisMwsNetworks",
        account_id=databricks_account_id,
        network_name=f"{var['prefix']}-network",
        gcp_network_info=databricks.MwsNetworksGcpNetworkInfoArgs(
            network_project_id=google_project,
            vpc_id=var["vpc_id"],
            subnet_id=var["subnet_id"],
            subnet_region=var["subnet_region"],
            pod_ip_range_name="pods",
            service_ip_range_name="svc",
        ))
    # create workspace in given VPC
    this_mws_workspaces = databricks.MwsWorkspaces("thisMwsWorkspaces",
        account_id=databricks_account_id,
        workspace_name=var["prefix"],
        location=var["subnet_region"],
        cloud_resource_container=databricks.MwsWorkspacesCloudResourceContainerArgs(
            gcp=databricks.MwsWorkspacesCloudResourceContainerGcpArgs(
                project_id=google_project,
            ),
        ),
        network_id=this_mws_networks.network_id,
        gke_config=databricks.MwsWorkspacesGkeConfigArgs(
            connectivity_type="PRIVATE_NODE_PUBLIC_MASTER",
            master_ip_range="10.3.0.0/28",
        ),
        token=databricks.MwsWorkspacesTokenArgs())
    pulumi.export("databricksToken", this_mws_workspaces.token.token_value)
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		cfg := config.New(ctx, "")
    		databricksAccountId := cfg.RequireObject("databricksAccountId")
    		databricksGoogleServiceAccount := cfg.RequireObject("databricksGoogleServiceAccount")
    		googleProject := cfg.RequireObject("googleProject")
    		_, err := databricks.NewProvider(ctx, "mws", &databricks.ProviderArgs{
    			Host: pulumi.String("https://accounts.gcp.databricks.com"),
    		})
    		if err != nil {
    			return err
    		}
    		// register VPC
    		thisMwsNetworks, err := databricks.NewMwsNetworks(ctx, "thisMwsNetworks", &databricks.MwsNetworksArgs{
    			AccountId:   pulumi.Any(databricksAccountId),
    			NetworkName: pulumi.String(fmt.Sprintf("%v-network", _var.Prefix)),
    			GcpNetworkInfo: &databricks.MwsNetworksGcpNetworkInfoArgs{
    				NetworkProjectId:   pulumi.Any(googleProject),
    				VpcId:              pulumi.Any(_var.Vpc_id),
    				SubnetId:           pulumi.Any(_var.Subnet_id),
    				SubnetRegion:       pulumi.Any(_var.Subnet_region),
    				PodIpRangeName:     pulumi.String("pods"),
    				ServiceIpRangeName: pulumi.String("svc"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		// create workspace in given VPC
    		thisMwsWorkspaces, err := databricks.NewMwsWorkspaces(ctx, "thisMwsWorkspaces", &databricks.MwsWorkspacesArgs{
    			AccountId:     pulumi.Any(databricksAccountId),
    			WorkspaceName: pulumi.Any(_var.Prefix),
    			Location:      pulumi.Any(_var.Subnet_region),
    			CloudResourceContainer: &databricks.MwsWorkspacesCloudResourceContainerArgs{
    				Gcp: &databricks.MwsWorkspacesCloudResourceContainerGcpArgs{
    					ProjectId: pulumi.Any(googleProject),
    				},
    			},
    			NetworkId: thisMwsNetworks.NetworkId,
    			GkeConfig: &databricks.MwsWorkspacesGkeConfigArgs{
    				ConnectivityType: pulumi.String("PRIVATE_NODE_PUBLIC_MASTER"),
    				MasterIpRange:    pulumi.String("10.3.0.0/28"),
    			},
    			Token: nil,
    		})
    		if err != nil {
    			return err
    		}
    		ctx.Export("databricksToken", thisMwsWorkspaces.Token.ApplyT(func(token databricks.MwsWorkspacesToken) (*string, error) {
    			return &token.TokenValue, nil
    		}).(pulumi.StringPtrOutput))
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var config = new Config();
        var databricksAccountId = config.RequireObject<dynamic>("databricksAccountId");
        var databricksGoogleServiceAccount = config.RequireObject<dynamic>("databricksGoogleServiceAccount");
        var googleProject = config.RequireObject<dynamic>("googleProject");
        var mws = new Databricks.Provider("mws", new()
        {
            Host = "https://accounts.gcp.databricks.com",
        });
    
        // register VPC
        var thisMwsNetworks = new Databricks.MwsNetworks("thisMwsNetworks", new()
        {
            AccountId = databricksAccountId,
            NetworkName = $"{@var.Prefix}-network",
            GcpNetworkInfo = new Databricks.Inputs.MwsNetworksGcpNetworkInfoArgs
            {
                NetworkProjectId = googleProject,
                VpcId = @var.Vpc_id,
                SubnetId = @var.Subnet_id,
                SubnetRegion = @var.Subnet_region,
                PodIpRangeName = "pods",
                ServiceIpRangeName = "svc",
            },
        });
    
        // create workspace in given VPC
        var thisMwsWorkspaces = new Databricks.MwsWorkspaces("thisMwsWorkspaces", new()
        {
            AccountId = databricksAccountId,
            WorkspaceName = @var.Prefix,
            Location = @var.Subnet_region,
            CloudResourceContainer = new Databricks.Inputs.MwsWorkspacesCloudResourceContainerArgs
            {
                Gcp = new Databricks.Inputs.MwsWorkspacesCloudResourceContainerGcpArgs
                {
                    ProjectId = googleProject,
                },
            },
            NetworkId = thisMwsNetworks.NetworkId,
            GkeConfig = new Databricks.Inputs.MwsWorkspacesGkeConfigArgs
            {
                ConnectivityType = "PRIVATE_NODE_PUBLIC_MASTER",
                MasterIpRange = "10.3.0.0/28",
            },
            Token = null,
        });
    
        return new Dictionary<string, object?>
        {
            ["databricksToken"] = thisMwsWorkspaces.Token.Apply(token => token?.TokenValue),
        };
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Provider;
    import com.pulumi.databricks.ProviderArgs;
    import com.pulumi.databricks.MwsNetworks;
    import com.pulumi.databricks.MwsNetworksArgs;
    import com.pulumi.databricks.inputs.MwsNetworksGcpNetworkInfoArgs;
    import com.pulumi.databricks.MwsWorkspaces;
    import com.pulumi.databricks.MwsWorkspacesArgs;
    import com.pulumi.databricks.inputs.MwsWorkspacesCloudResourceContainerArgs;
    import com.pulumi.databricks.inputs.MwsWorkspacesCloudResourceContainerGcpArgs;
    import com.pulumi.databricks.inputs.MwsWorkspacesGkeConfigArgs;
    import com.pulumi.databricks.inputs.MwsWorkspacesTokenArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var config = ctx.config();
            final var databricksAccountId = config.get("databricksAccountId");
            final var databricksGoogleServiceAccount = config.get("databricksGoogleServiceAccount");
            final var googleProject = config.get("googleProject");
            var mws = new Provider("mws", ProviderArgs.builder()        
                .host("https://accounts.gcp.databricks.com")
                .build());
    
            var thisMwsNetworks = new MwsNetworks("thisMwsNetworks", MwsNetworksArgs.builder()        
                .accountId(databricksAccountId)
                .networkName(String.format("%s-network", var_.prefix()))
                .gcpNetworkInfo(MwsNetworksGcpNetworkInfoArgs.builder()
                    .networkProjectId(googleProject)
                    .vpcId(var_.vpc_id())
                    .subnetId(var_.subnet_id())
                    .subnetRegion(var_.subnet_region())
                    .podIpRangeName("pods")
                    .serviceIpRangeName("svc")
                    .build())
                .build());
    
            var thisMwsWorkspaces = new MwsWorkspaces("thisMwsWorkspaces", MwsWorkspacesArgs.builder()        
                .accountId(databricksAccountId)
                .workspaceName(var_.prefix())
                .location(var_.subnet_region())
                .cloudResourceContainer(MwsWorkspacesCloudResourceContainerArgs.builder()
                    .gcp(MwsWorkspacesCloudResourceContainerGcpArgs.builder()
                        .projectId(googleProject)
                        .build())
                    .build())
                .networkId(thisMwsNetworks.networkId())
                .gkeConfig(MwsWorkspacesGkeConfigArgs.builder()
                    .connectivityType("PRIVATE_NODE_PUBLIC_MASTER")
                    .masterIpRange("10.3.0.0/28")
                    .build())
                .token()
                .build());
    
            ctx.export("databricksToken", thisMwsWorkspaces.token().applyValue(token -> token.tokenValue()));
        }
    }
    
    configuration:
      databricksAccountId:
        type: dynamic
      databricksGoogleServiceAccount:
        type: dynamic
      googleProject:
        type: dynamic
    resources:
      mws:
        type: pulumi:providers:databricks
        properties:
          host: https://accounts.gcp.databricks.com
      # register VPC
      thisMwsNetworks:
        type: databricks:MwsNetworks
        properties:
          accountId: ${databricksAccountId}
          networkName: ${var.prefix}-network
          gcpNetworkInfo:
            networkProjectId: ${googleProject}
            vpcId: ${var.vpc_id}
            subnetId: ${var.subnet_id}
            subnetRegion: ${var.subnet_region}
            podIpRangeName: pods
            serviceIpRangeName: svc
      # create workspace in given VPC
      thisMwsWorkspaces:
        type: databricks:MwsWorkspaces
        properties:
          accountId: ${databricksAccountId}
          workspaceName: ${var.prefix}
          location: ${var.subnet_region}
          cloudResourceContainer:
            gcp:
              projectId: ${googleProject}
          networkId: ${thisMwsNetworks.networkId}
          gkeConfig:
            connectivityType: PRIVATE_NODE_PUBLIC_MASTER
            masterIpRange: 10.3.0.0/28
          token: {}
    outputs:
      databricksToken: ${thisMwsWorkspaces.token.tokenValue}
    

    In order to create a Databricks Workspace that leverages GCP Private Service Connect please ensure that you have read and understood the Enable Private Service Connect documentation and then customise the example above with the relevant examples from mws_vpc_endpoint, mws_private_access_settings and mws_networks.

    Create MwsWorkspaces Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new MwsWorkspaces(name: string, args: MwsWorkspacesArgs, opts?: CustomResourceOptions);
    @overload
    def MwsWorkspaces(resource_name: str,
                      args: MwsWorkspacesArgs,
                      opts: Optional[ResourceOptions] = None)
    
    @overload
    def MwsWorkspaces(resource_name: str,
                      opts: Optional[ResourceOptions] = None,
                      account_id: Optional[str] = None,
                      workspace_name: Optional[str] = None,
                      is_no_public_ip_enabled: Optional[bool] = None,
                      workspace_status: Optional[str] = None,
                      creation_time: Optional[int] = None,
                      credentials_id: Optional[str] = None,
                      custom_tags: Optional[Mapping[str, Any]] = None,
                      customer_managed_key_id: Optional[str] = None,
                      deployment_name: Optional[str] = None,
                      external_customer_info: Optional[MwsWorkspacesExternalCustomerInfoArgs] = None,
                      gcp_managed_network_config: Optional[MwsWorkspacesGcpManagedNetworkConfigArgs] = None,
                      gke_config: Optional[MwsWorkspacesGkeConfigArgs] = None,
                      workspace_url: Optional[str] = None,
                      cloud_resource_container: Optional[MwsWorkspacesCloudResourceContainerArgs] = None,
                      pricing_tier: Optional[str] = None,
                      network_id: Optional[str] = None,
                      managed_services_customer_managed_key_id: Optional[str] = None,
                      private_access_settings_id: Optional[str] = None,
                      storage_configuration_id: Optional[str] = None,
                      storage_customer_managed_key_id: Optional[str] = None,
                      token: Optional[MwsWorkspacesTokenArgs] = None,
                      workspace_id: Optional[int] = None,
                      aws_region: Optional[str] = None,
                      location: Optional[str] = None,
                      workspace_status_message: Optional[str] = None,
                      cloud: Optional[str] = None)
    func NewMwsWorkspaces(ctx *Context, name string, args MwsWorkspacesArgs, opts ...ResourceOption) (*MwsWorkspaces, error)
    public MwsWorkspaces(string name, MwsWorkspacesArgs args, CustomResourceOptions? opts = null)
    public MwsWorkspaces(String name, MwsWorkspacesArgs args)
    public MwsWorkspaces(String name, MwsWorkspacesArgs args, CustomResourceOptions options)
    
    type: databricks:MwsWorkspaces
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args MwsWorkspacesArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args MwsWorkspacesArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args MwsWorkspacesArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args MwsWorkspacesArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args MwsWorkspacesArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var mwsWorkspacesResource = new Databricks.MwsWorkspaces("mwsWorkspacesResource", new()
    {
        AccountId = "string",
        WorkspaceName = "string",
        IsNoPublicIpEnabled = false,
        WorkspaceStatus = "string",
        CreationTime = 0,
        CredentialsId = "string",
        CustomTags = 
        {
            { "string", "any" },
        },
        DeploymentName = "string",
        ExternalCustomerInfo = new Databricks.Inputs.MwsWorkspacesExternalCustomerInfoArgs
        {
            AuthoritativeUserEmail = "string",
            AuthoritativeUserFullName = "string",
            CustomerName = "string",
        },
        GcpManagedNetworkConfig = new Databricks.Inputs.MwsWorkspacesGcpManagedNetworkConfigArgs
        {
            GkeClusterPodIpRange = "string",
            GkeClusterServiceIpRange = "string",
            SubnetCidr = "string",
        },
        GkeConfig = new Databricks.Inputs.MwsWorkspacesGkeConfigArgs
        {
            ConnectivityType = "string",
            MasterIpRange = "string",
        },
        WorkspaceUrl = "string",
        CloudResourceContainer = new Databricks.Inputs.MwsWorkspacesCloudResourceContainerArgs
        {
            Gcp = new Databricks.Inputs.MwsWorkspacesCloudResourceContainerGcpArgs
            {
                ProjectId = "string",
            },
        },
        PricingTier = "string",
        NetworkId = "string",
        ManagedServicesCustomerManagedKeyId = "string",
        PrivateAccessSettingsId = "string",
        StorageConfigurationId = "string",
        StorageCustomerManagedKeyId = "string",
        Token = new Databricks.Inputs.MwsWorkspacesTokenArgs
        {
            Comment = "string",
            LifetimeSeconds = 0,
            TokenId = "string",
            TokenValue = "string",
        },
        WorkspaceId = 0,
        AwsRegion = "string",
        Location = "string",
        WorkspaceStatusMessage = "string",
        Cloud = "string",
    });
    
    example, err := databricks.NewMwsWorkspaces(ctx, "mwsWorkspacesResource", &databricks.MwsWorkspacesArgs{
    	AccountId:           pulumi.String("string"),
    	WorkspaceName:       pulumi.String("string"),
    	IsNoPublicIpEnabled: pulumi.Bool(false),
    	WorkspaceStatus:     pulumi.String("string"),
    	CreationTime:        pulumi.Int(0),
    	CredentialsId:       pulumi.String("string"),
    	CustomTags: pulumi.Map{
    		"string": pulumi.Any("any"),
    	},
    	DeploymentName: pulumi.String("string"),
    	ExternalCustomerInfo: &databricks.MwsWorkspacesExternalCustomerInfoArgs{
    		AuthoritativeUserEmail:    pulumi.String("string"),
    		AuthoritativeUserFullName: pulumi.String("string"),
    		CustomerName:              pulumi.String("string"),
    	},
    	GcpManagedNetworkConfig: &databricks.MwsWorkspacesGcpManagedNetworkConfigArgs{
    		GkeClusterPodIpRange:     pulumi.String("string"),
    		GkeClusterServiceIpRange: pulumi.String("string"),
    		SubnetCidr:               pulumi.String("string"),
    	},
    	GkeConfig: &databricks.MwsWorkspacesGkeConfigArgs{
    		ConnectivityType: pulumi.String("string"),
    		MasterIpRange:    pulumi.String("string"),
    	},
    	WorkspaceUrl: pulumi.String("string"),
    	CloudResourceContainer: &databricks.MwsWorkspacesCloudResourceContainerArgs{
    		Gcp: &databricks.MwsWorkspacesCloudResourceContainerGcpArgs{
    			ProjectId: pulumi.String("string"),
    		},
    	},
    	PricingTier:                         pulumi.String("string"),
    	NetworkId:                           pulumi.String("string"),
    	ManagedServicesCustomerManagedKeyId: pulumi.String("string"),
    	PrivateAccessSettingsId:             pulumi.String("string"),
    	StorageConfigurationId:              pulumi.String("string"),
    	StorageCustomerManagedKeyId:         pulumi.String("string"),
    	Token: &databricks.MwsWorkspacesTokenArgs{
    		Comment:         pulumi.String("string"),
    		LifetimeSeconds: pulumi.Int(0),
    		TokenId:         pulumi.String("string"),
    		TokenValue:      pulumi.String("string"),
    	},
    	WorkspaceId:            pulumi.Int(0),
    	AwsRegion:              pulumi.String("string"),
    	Location:               pulumi.String("string"),
    	WorkspaceStatusMessage: pulumi.String("string"),
    	Cloud:                  pulumi.String("string"),
    })
    
    var mwsWorkspacesResource = new MwsWorkspaces("mwsWorkspacesResource", MwsWorkspacesArgs.builder()        
        .accountId("string")
        .workspaceName("string")
        .isNoPublicIpEnabled(false)
        .workspaceStatus("string")
        .creationTime(0)
        .credentialsId("string")
        .customTags(Map.of("string", "any"))
        .deploymentName("string")
        .externalCustomerInfo(MwsWorkspacesExternalCustomerInfoArgs.builder()
            .authoritativeUserEmail("string")
            .authoritativeUserFullName("string")
            .customerName("string")
            .build())
        .gcpManagedNetworkConfig(MwsWorkspacesGcpManagedNetworkConfigArgs.builder()
            .gkeClusterPodIpRange("string")
            .gkeClusterServiceIpRange("string")
            .subnetCidr("string")
            .build())
        .gkeConfig(MwsWorkspacesGkeConfigArgs.builder()
            .connectivityType("string")
            .masterIpRange("string")
            .build())
        .workspaceUrl("string")
        .cloudResourceContainer(MwsWorkspacesCloudResourceContainerArgs.builder()
            .gcp(MwsWorkspacesCloudResourceContainerGcpArgs.builder()
                .projectId("string")
                .build())
            .build())
        .pricingTier("string")
        .networkId("string")
        .managedServicesCustomerManagedKeyId("string")
        .privateAccessSettingsId("string")
        .storageConfigurationId("string")
        .storageCustomerManagedKeyId("string")
        .token(MwsWorkspacesTokenArgs.builder()
            .comment("string")
            .lifetimeSeconds(0)
            .tokenId("string")
            .tokenValue("string")
            .build())
        .workspaceId(0)
        .awsRegion("string")
        .location("string")
        .workspaceStatusMessage("string")
        .cloud("string")
        .build());
    
    mws_workspaces_resource = databricks.MwsWorkspaces("mwsWorkspacesResource",
        account_id="string",
        workspace_name="string",
        is_no_public_ip_enabled=False,
        workspace_status="string",
        creation_time=0,
        credentials_id="string",
        custom_tags={
            "string": "any",
        },
        deployment_name="string",
        external_customer_info=databricks.MwsWorkspacesExternalCustomerInfoArgs(
            authoritative_user_email="string",
            authoritative_user_full_name="string",
            customer_name="string",
        ),
        gcp_managed_network_config=databricks.MwsWorkspacesGcpManagedNetworkConfigArgs(
            gke_cluster_pod_ip_range="string",
            gke_cluster_service_ip_range="string",
            subnet_cidr="string",
        ),
        gke_config=databricks.MwsWorkspacesGkeConfigArgs(
            connectivity_type="string",
            master_ip_range="string",
        ),
        workspace_url="string",
        cloud_resource_container=databricks.MwsWorkspacesCloudResourceContainerArgs(
            gcp=databricks.MwsWorkspacesCloudResourceContainerGcpArgs(
                project_id="string",
            ),
        ),
        pricing_tier="string",
        network_id="string",
        managed_services_customer_managed_key_id="string",
        private_access_settings_id="string",
        storage_configuration_id="string",
        storage_customer_managed_key_id="string",
        token=databricks.MwsWorkspacesTokenArgs(
            comment="string",
            lifetime_seconds=0,
            token_id="string",
            token_value="string",
        ),
        workspace_id=0,
        aws_region="string",
        location="string",
        workspace_status_message="string",
        cloud="string")
    
    const mwsWorkspacesResource = new databricks.MwsWorkspaces("mwsWorkspacesResource", {
        accountId: "string",
        workspaceName: "string",
        isNoPublicIpEnabled: false,
        workspaceStatus: "string",
        creationTime: 0,
        credentialsId: "string",
        customTags: {
            string: "any",
        },
        deploymentName: "string",
        externalCustomerInfo: {
            authoritativeUserEmail: "string",
            authoritativeUserFullName: "string",
            customerName: "string",
        },
        gcpManagedNetworkConfig: {
            gkeClusterPodIpRange: "string",
            gkeClusterServiceIpRange: "string",
            subnetCidr: "string",
        },
        gkeConfig: {
            connectivityType: "string",
            masterIpRange: "string",
        },
        workspaceUrl: "string",
        cloudResourceContainer: {
            gcp: {
                projectId: "string",
            },
        },
        pricingTier: "string",
        networkId: "string",
        managedServicesCustomerManagedKeyId: "string",
        privateAccessSettingsId: "string",
        storageConfigurationId: "string",
        storageCustomerManagedKeyId: "string",
        token: {
            comment: "string",
            lifetimeSeconds: 0,
            tokenId: "string",
            tokenValue: "string",
        },
        workspaceId: 0,
        awsRegion: "string",
        location: "string",
        workspaceStatusMessage: "string",
        cloud: "string",
    });
    
    type: databricks:MwsWorkspaces
    properties:
        accountId: string
        awsRegion: string
        cloud: string
        cloudResourceContainer:
            gcp:
                projectId: string
        creationTime: 0
        credentialsId: string
        customTags:
            string: any
        deploymentName: string
        externalCustomerInfo:
            authoritativeUserEmail: string
            authoritativeUserFullName: string
            customerName: string
        gcpManagedNetworkConfig:
            gkeClusterPodIpRange: string
            gkeClusterServiceIpRange: string
            subnetCidr: string
        gkeConfig:
            connectivityType: string
            masterIpRange: string
        isNoPublicIpEnabled: false
        location: string
        managedServicesCustomerManagedKeyId: string
        networkId: string
        pricingTier: string
        privateAccessSettingsId: string
        storageConfigurationId: string
        storageCustomerManagedKeyId: string
        token:
            comment: string
            lifetimeSeconds: 0
            tokenId: string
            tokenValue: string
        workspaceId: 0
        workspaceName: string
        workspaceStatus: string
        workspaceStatusMessage: string
        workspaceUrl: string
    

    MwsWorkspaces Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The MwsWorkspaces resource accepts the following input properties:

    AccountId string
    Account Id that could be found in the top right corner of Accounts Console.
    WorkspaceName string
    name of the workspace, will appear on UI.
    AwsRegion string
    region of VPC.
    Cloud string
    CloudResourceContainer MwsWorkspacesCloudResourceContainer
    A block that specifies GCP workspace configurations, consisting of following blocks:
    CreationTime int
    (Integer) time when workspace was created
    CredentialsId string
    CustomTags Dictionary<string, object>
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    CustomerManagedKeyId string

    Deprecated: Use managed_services_customer_managed_key_id instead

    DeploymentName string
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    ExternalCustomerInfo MwsWorkspacesExternalCustomerInfo
    GcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfig
    GkeConfig MwsWorkspacesGkeConfig
    A block that specifies GKE configuration for the Databricks workspace:
    IsNoPublicIpEnabled bool
    Location string
    region of the subnet.
    ManagedServicesCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    NetworkId string
    network_id from networks.
    PricingTier string
    PrivateAccessSettingsId string
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    StorageConfigurationId string
    storage_configuration_id from storage configuration.
    StorageCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    Token MwsWorkspacesToken
    WorkspaceId int
    (String) workspace id
    WorkspaceStatus string
    (String) workspace status
    WorkspaceStatusMessage string
    (String) updates on workspace status
    WorkspaceUrl string
    (String) URL of the workspace
    AccountId string
    Account Id that could be found in the top right corner of Accounts Console.
    WorkspaceName string
    name of the workspace, will appear on UI.
    AwsRegion string
    region of VPC.
    Cloud string
    CloudResourceContainer MwsWorkspacesCloudResourceContainerArgs
    A block that specifies GCP workspace configurations, consisting of following blocks:
    CreationTime int
    (Integer) time when workspace was created
    CredentialsId string
    CustomTags map[string]interface{}
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    CustomerManagedKeyId string

    Deprecated: Use managed_services_customer_managed_key_id instead

    DeploymentName string
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    ExternalCustomerInfo MwsWorkspacesExternalCustomerInfoArgs
    GcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfigArgs
    GkeConfig MwsWorkspacesGkeConfigArgs
    A block that specifies GKE configuration for the Databricks workspace:
    IsNoPublicIpEnabled bool
    Location string
    region of the subnet.
    ManagedServicesCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    NetworkId string
    network_id from networks.
    PricingTier string
    PrivateAccessSettingsId string
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    StorageConfigurationId string
    storage_configuration_id from storage configuration.
    StorageCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    Token MwsWorkspacesTokenArgs
    WorkspaceId int
    (String) workspace id
    WorkspaceStatus string
    (String) workspace status
    WorkspaceStatusMessage string
    (String) updates on workspace status
    WorkspaceUrl string
    (String) URL of the workspace
    accountId String
    Account Id that could be found in the top right corner of Accounts Console.
    workspaceName String
    name of the workspace, will appear on UI.
    awsRegion String
    region of VPC.
    cloud String
    cloudResourceContainer MwsWorkspacesCloudResourceContainer
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creationTime Integer
    (Integer) time when workspace was created
    credentialsId String
    customTags Map<String,Object>
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customerManagedKeyId String

    Deprecated: Use managed_services_customer_managed_key_id instead

    deploymentName String
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    externalCustomerInfo MwsWorkspacesExternalCustomerInfo
    gcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfig
    gkeConfig MwsWorkspacesGkeConfig
    A block that specifies GKE configuration for the Databricks workspace:
    isNoPublicIpEnabled Boolean
    location String
    region of the subnet.
    managedServicesCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    networkId String
    network_id from networks.
    pricingTier String
    privateAccessSettingsId String
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storageConfigurationId String
    storage_configuration_id from storage configuration.
    storageCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token MwsWorkspacesToken
    workspaceId Integer
    (String) workspace id
    workspaceStatus String
    (String) workspace status
    workspaceStatusMessage String
    (String) updates on workspace status
    workspaceUrl String
    (String) URL of the workspace
    accountId string
    Account Id that could be found in the top right corner of Accounts Console.
    workspaceName string
    name of the workspace, will appear on UI.
    awsRegion string
    region of VPC.
    cloud string
    cloudResourceContainer MwsWorkspacesCloudResourceContainer
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creationTime number
    (Integer) time when workspace was created
    credentialsId string
    customTags {[key: string]: any}
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customerManagedKeyId string

    Deprecated: Use managed_services_customer_managed_key_id instead

    deploymentName string
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    externalCustomerInfo MwsWorkspacesExternalCustomerInfo
    gcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfig
    gkeConfig MwsWorkspacesGkeConfig
    A block that specifies GKE configuration for the Databricks workspace:
    isNoPublicIpEnabled boolean
    location string
    region of the subnet.
    managedServicesCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    networkId string
    network_id from networks.
    pricingTier string
    privateAccessSettingsId string
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storageConfigurationId string
    storage_configuration_id from storage configuration.
    storageCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token MwsWorkspacesToken
    workspaceId number
    (String) workspace id
    workspaceStatus string
    (String) workspace status
    workspaceStatusMessage string
    (String) updates on workspace status
    workspaceUrl string
    (String) URL of the workspace
    account_id str
    Account Id that could be found in the top right corner of Accounts Console.
    workspace_name str
    name of the workspace, will appear on UI.
    aws_region str
    region of VPC.
    cloud str
    cloud_resource_container MwsWorkspacesCloudResourceContainerArgs
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creation_time int
    (Integer) time when workspace was created
    credentials_id str
    custom_tags Mapping[str, Any]
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customer_managed_key_id str

    Deprecated: Use managed_services_customer_managed_key_id instead

    deployment_name str
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    external_customer_info MwsWorkspacesExternalCustomerInfoArgs
    gcp_managed_network_config MwsWorkspacesGcpManagedNetworkConfigArgs
    gke_config MwsWorkspacesGkeConfigArgs
    A block that specifies GKE configuration for the Databricks workspace:
    is_no_public_ip_enabled bool
    location str
    region of the subnet.
    managed_services_customer_managed_key_id str
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    network_id str
    network_id from networks.
    pricing_tier str
    private_access_settings_id str
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storage_configuration_id str
    storage_configuration_id from storage configuration.
    storage_customer_managed_key_id str
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token MwsWorkspacesTokenArgs
    workspace_id int
    (String) workspace id
    workspace_status str
    (String) workspace status
    workspace_status_message str
    (String) updates on workspace status
    workspace_url str
    (String) URL of the workspace
    accountId String
    Account Id that could be found in the top right corner of Accounts Console.
    workspaceName String
    name of the workspace, will appear on UI.
    awsRegion String
    region of VPC.
    cloud String
    cloudResourceContainer Property Map
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creationTime Number
    (Integer) time when workspace was created
    credentialsId String
    customTags Map<Any>
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customerManagedKeyId String

    Deprecated: Use managed_services_customer_managed_key_id instead

    deploymentName String
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    externalCustomerInfo Property Map
    gcpManagedNetworkConfig Property Map
    gkeConfig Property Map
    A block that specifies GKE configuration for the Databricks workspace:
    isNoPublicIpEnabled Boolean
    location String
    region of the subnet.
    managedServicesCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    networkId String
    network_id from networks.
    pricingTier String
    privateAccessSettingsId String
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storageConfigurationId String
    storage_configuration_id from storage configuration.
    storageCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token Property Map
    workspaceId Number
    (String) workspace id
    workspaceStatus String
    (String) workspace status
    workspaceStatusMessage String
    (String) updates on workspace status
    workspaceUrl String
    (String) URL of the workspace

    Outputs

    All input properties are implicitly available as output properties. Additionally, the MwsWorkspaces resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing MwsWorkspaces Resource

    Get an existing MwsWorkspaces resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: MwsWorkspacesState, opts?: CustomResourceOptions): MwsWorkspaces
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            account_id: Optional[str] = None,
            aws_region: Optional[str] = None,
            cloud: Optional[str] = None,
            cloud_resource_container: Optional[MwsWorkspacesCloudResourceContainerArgs] = None,
            creation_time: Optional[int] = None,
            credentials_id: Optional[str] = None,
            custom_tags: Optional[Mapping[str, Any]] = None,
            customer_managed_key_id: Optional[str] = None,
            deployment_name: Optional[str] = None,
            external_customer_info: Optional[MwsWorkspacesExternalCustomerInfoArgs] = None,
            gcp_managed_network_config: Optional[MwsWorkspacesGcpManagedNetworkConfigArgs] = None,
            gke_config: Optional[MwsWorkspacesGkeConfigArgs] = None,
            is_no_public_ip_enabled: Optional[bool] = None,
            location: Optional[str] = None,
            managed_services_customer_managed_key_id: Optional[str] = None,
            network_id: Optional[str] = None,
            pricing_tier: Optional[str] = None,
            private_access_settings_id: Optional[str] = None,
            storage_configuration_id: Optional[str] = None,
            storage_customer_managed_key_id: Optional[str] = None,
            token: Optional[MwsWorkspacesTokenArgs] = None,
            workspace_id: Optional[int] = None,
            workspace_name: Optional[str] = None,
            workspace_status: Optional[str] = None,
            workspace_status_message: Optional[str] = None,
            workspace_url: Optional[str] = None) -> MwsWorkspaces
    func GetMwsWorkspaces(ctx *Context, name string, id IDInput, state *MwsWorkspacesState, opts ...ResourceOption) (*MwsWorkspaces, error)
    public static MwsWorkspaces Get(string name, Input<string> id, MwsWorkspacesState? state, CustomResourceOptions? opts = null)
    public static MwsWorkspaces get(String name, Output<String> id, MwsWorkspacesState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AccountId string
    Account Id that could be found in the top right corner of Accounts Console.
    AwsRegion string
    region of VPC.
    Cloud string
    CloudResourceContainer MwsWorkspacesCloudResourceContainer
    A block that specifies GCP workspace configurations, consisting of following blocks:
    CreationTime int
    (Integer) time when workspace was created
    CredentialsId string
    CustomTags Dictionary<string, object>
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    CustomerManagedKeyId string

    Deprecated: Use managed_services_customer_managed_key_id instead

    DeploymentName string
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    ExternalCustomerInfo MwsWorkspacesExternalCustomerInfo
    GcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfig
    GkeConfig MwsWorkspacesGkeConfig
    A block that specifies GKE configuration for the Databricks workspace:
    IsNoPublicIpEnabled bool
    Location string
    region of the subnet.
    ManagedServicesCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    NetworkId string
    network_id from networks.
    PricingTier string
    PrivateAccessSettingsId string
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    StorageConfigurationId string
    storage_configuration_id from storage configuration.
    StorageCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    Token MwsWorkspacesToken
    WorkspaceId int
    (String) workspace id
    WorkspaceName string
    name of the workspace, will appear on UI.
    WorkspaceStatus string
    (String) workspace status
    WorkspaceStatusMessage string
    (String) updates on workspace status
    WorkspaceUrl string
    (String) URL of the workspace
    AccountId string
    Account Id that could be found in the top right corner of Accounts Console.
    AwsRegion string
    region of VPC.
    Cloud string
    CloudResourceContainer MwsWorkspacesCloudResourceContainerArgs
    A block that specifies GCP workspace configurations, consisting of following blocks:
    CreationTime int
    (Integer) time when workspace was created
    CredentialsId string
    CustomTags map[string]interface{}
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    CustomerManagedKeyId string

    Deprecated: Use managed_services_customer_managed_key_id instead

    DeploymentName string
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    ExternalCustomerInfo MwsWorkspacesExternalCustomerInfoArgs
    GcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfigArgs
    GkeConfig MwsWorkspacesGkeConfigArgs
    A block that specifies GKE configuration for the Databricks workspace:
    IsNoPublicIpEnabled bool
    Location string
    region of the subnet.
    ManagedServicesCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    NetworkId string
    network_id from networks.
    PricingTier string
    PrivateAccessSettingsId string
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    StorageConfigurationId string
    storage_configuration_id from storage configuration.
    StorageCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    Token MwsWorkspacesTokenArgs
    WorkspaceId int
    (String) workspace id
    WorkspaceName string
    name of the workspace, will appear on UI.
    WorkspaceStatus string
    (String) workspace status
    WorkspaceStatusMessage string
    (String) updates on workspace status
    WorkspaceUrl string
    (String) URL of the workspace
    accountId String
    Account Id that could be found in the top right corner of Accounts Console.
    awsRegion String
    region of VPC.
    cloud String
    cloudResourceContainer MwsWorkspacesCloudResourceContainer
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creationTime Integer
    (Integer) time when workspace was created
    credentialsId String
    customTags Map<String,Object>
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customerManagedKeyId String

    Deprecated: Use managed_services_customer_managed_key_id instead

    deploymentName String
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    externalCustomerInfo MwsWorkspacesExternalCustomerInfo
    gcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfig
    gkeConfig MwsWorkspacesGkeConfig
    A block that specifies GKE configuration for the Databricks workspace:
    isNoPublicIpEnabled Boolean
    location String
    region of the subnet.
    managedServicesCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    networkId String
    network_id from networks.
    pricingTier String
    privateAccessSettingsId String
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storageConfigurationId String
    storage_configuration_id from storage configuration.
    storageCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token MwsWorkspacesToken
    workspaceId Integer
    (String) workspace id
    workspaceName String
    name of the workspace, will appear on UI.
    workspaceStatus String
    (String) workspace status
    workspaceStatusMessage String
    (String) updates on workspace status
    workspaceUrl String
    (String) URL of the workspace
    accountId string
    Account Id that could be found in the top right corner of Accounts Console.
    awsRegion string
    region of VPC.
    cloud string
    cloudResourceContainer MwsWorkspacesCloudResourceContainer
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creationTime number
    (Integer) time when workspace was created
    credentialsId string
    customTags {[key: string]: any}
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customerManagedKeyId string

    Deprecated: Use managed_services_customer_managed_key_id instead

    deploymentName string
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    externalCustomerInfo MwsWorkspacesExternalCustomerInfo
    gcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfig
    gkeConfig MwsWorkspacesGkeConfig
    A block that specifies GKE configuration for the Databricks workspace:
    isNoPublicIpEnabled boolean
    location string
    region of the subnet.
    managedServicesCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    networkId string
    network_id from networks.
    pricingTier string
    privateAccessSettingsId string
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storageConfigurationId string
    storage_configuration_id from storage configuration.
    storageCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token MwsWorkspacesToken
    workspaceId number
    (String) workspace id
    workspaceName string
    name of the workspace, will appear on UI.
    workspaceStatus string
    (String) workspace status
    workspaceStatusMessage string
    (String) updates on workspace status
    workspaceUrl string
    (String) URL of the workspace
    account_id str
    Account Id that could be found in the top right corner of Accounts Console.
    aws_region str
    region of VPC.
    cloud str
    cloud_resource_container MwsWorkspacesCloudResourceContainerArgs
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creation_time int
    (Integer) time when workspace was created
    credentials_id str
    custom_tags Mapping[str, Any]
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customer_managed_key_id str

    Deprecated: Use managed_services_customer_managed_key_id instead

    deployment_name str
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    external_customer_info MwsWorkspacesExternalCustomerInfoArgs
    gcp_managed_network_config MwsWorkspacesGcpManagedNetworkConfigArgs
    gke_config MwsWorkspacesGkeConfigArgs
    A block that specifies GKE configuration for the Databricks workspace:
    is_no_public_ip_enabled bool
    location str
    region of the subnet.
    managed_services_customer_managed_key_id str
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    network_id str
    network_id from networks.
    pricing_tier str
    private_access_settings_id str
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storage_configuration_id str
    storage_configuration_id from storage configuration.
    storage_customer_managed_key_id str
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token MwsWorkspacesTokenArgs
    workspace_id int
    (String) workspace id
    workspace_name str
    name of the workspace, will appear on UI.
    workspace_status str
    (String) workspace status
    workspace_status_message str
    (String) updates on workspace status
    workspace_url str
    (String) URL of the workspace
    accountId String
    Account Id that could be found in the top right corner of Accounts Console.
    awsRegion String
    region of VPC.
    cloud String
    cloudResourceContainer Property Map
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creationTime Number
    (Integer) time when workspace was created
    credentialsId String
    customTags Map<Any>
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customerManagedKeyId String

    Deprecated: Use managed_services_customer_managed_key_id instead

    deploymentName String
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    externalCustomerInfo Property Map
    gcpManagedNetworkConfig Property Map
    gkeConfig Property Map
    A block that specifies GKE configuration for the Databricks workspace:
    isNoPublicIpEnabled Boolean
    location String
    region of the subnet.
    managedServicesCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    networkId String
    network_id from networks.
    pricingTier String
    privateAccessSettingsId String
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storageConfigurationId String
    storage_configuration_id from storage configuration.
    storageCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token Property Map
    workspaceId Number
    (String) workspace id
    workspaceName String
    name of the workspace, will appear on UI.
    workspaceStatus String
    (String) workspace status
    workspaceStatusMessage String
    (String) updates on workspace status
    workspaceUrl String
    (String) URL of the workspace

    Supporting Types

    MwsWorkspacesCloudResourceContainer, MwsWorkspacesCloudResourceContainerArgs

    Gcp MwsWorkspacesCloudResourceContainerGcp
    A block that consists of the following field:
    Gcp MwsWorkspacesCloudResourceContainerGcp
    A block that consists of the following field:
    gcp MwsWorkspacesCloudResourceContainerGcp
    A block that consists of the following field:
    gcp MwsWorkspacesCloudResourceContainerGcp
    A block that consists of the following field:
    gcp MwsWorkspacesCloudResourceContainerGcp
    A block that consists of the following field:
    gcp Property Map
    A block that consists of the following field:

    MwsWorkspacesCloudResourceContainerGcp, MwsWorkspacesCloudResourceContainerGcpArgs

    ProjectId string
    The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your workspace.
    ProjectId string
    The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your workspace.
    projectId String
    The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your workspace.
    projectId string
    The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your workspace.
    project_id str
    The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your workspace.
    projectId String
    The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your workspace.

    MwsWorkspacesExternalCustomerInfo, MwsWorkspacesExternalCustomerInfoArgs

    MwsWorkspacesGcpManagedNetworkConfig, MwsWorkspacesGcpManagedNetworkConfigArgs

    MwsWorkspacesGkeConfig, MwsWorkspacesGkeConfigArgs

    ConnectivityType string
    Specifies the network connectivity types for the GKE nodes and the GKE master network. Possible values are: PRIVATE_NODE_PUBLIC_MASTER, PUBLIC_NODE_PUBLIC_MASTER.
    MasterIpRange string
    The IP range from which to allocate GKE cluster master resources. This field will be ignored if GKE private cluster is not enabled. It must be exactly as big as /28.
    ConnectivityType string
    Specifies the network connectivity types for the GKE nodes and the GKE master network. Possible values are: PRIVATE_NODE_PUBLIC_MASTER, PUBLIC_NODE_PUBLIC_MASTER.
    MasterIpRange string
    The IP range from which to allocate GKE cluster master resources. This field will be ignored if GKE private cluster is not enabled. It must be exactly as big as /28.
    connectivityType String
    Specifies the network connectivity types for the GKE nodes and the GKE master network. Possible values are: PRIVATE_NODE_PUBLIC_MASTER, PUBLIC_NODE_PUBLIC_MASTER.
    masterIpRange String
    The IP range from which to allocate GKE cluster master resources. This field will be ignored if GKE private cluster is not enabled. It must be exactly as big as /28.
    connectivityType string
    Specifies the network connectivity types for the GKE nodes and the GKE master network. Possible values are: PRIVATE_NODE_PUBLIC_MASTER, PUBLIC_NODE_PUBLIC_MASTER.
    masterIpRange string
    The IP range from which to allocate GKE cluster master resources. This field will be ignored if GKE private cluster is not enabled. It must be exactly as big as /28.
    connectivity_type str
    Specifies the network connectivity types for the GKE nodes and the GKE master network. Possible values are: PRIVATE_NODE_PUBLIC_MASTER, PUBLIC_NODE_PUBLIC_MASTER.
    master_ip_range str
    The IP range from which to allocate GKE cluster master resources. This field will be ignored if GKE private cluster is not enabled. It must be exactly as big as /28.
    connectivityType String
    Specifies the network connectivity types for the GKE nodes and the GKE master network. Possible values are: PRIVATE_NODE_PUBLIC_MASTER, PUBLIC_NODE_PUBLIC_MASTER.
    masterIpRange String
    The IP range from which to allocate GKE cluster master resources. This field will be ignored if GKE private cluster is not enabled. It must be exactly as big as /28.

    MwsWorkspacesToken, MwsWorkspacesTokenArgs

    Comment string
    LifetimeSeconds int
    Token expiry lifetime. By default its 2592000 (30 days).
    TokenId string
    TokenValue string
    Comment string
    LifetimeSeconds int
    Token expiry lifetime. By default its 2592000 (30 days).
    TokenId string
    TokenValue string
    comment String
    lifetimeSeconds Integer
    Token expiry lifetime. By default its 2592000 (30 days).
    tokenId String
    tokenValue String
    comment string
    lifetimeSeconds number
    Token expiry lifetime. By default its 2592000 (30 days).
    tokenId string
    tokenValue string
    comment str
    lifetime_seconds int
    Token expiry lifetime. By default its 2592000 (30 days).
    token_id str
    token_value str
    comment String
    lifetimeSeconds Number
    Token expiry lifetime. By default its 2592000 (30 days).
    tokenId String
    tokenValue String

    Package Details

    Repository
    databricks pulumi/pulumi-databricks
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the databricks Terraform Provider.
    databricks logo
    Databricks v1.35.0 published on Friday, Mar 29, 2024 by Pulumi