databricks logo
Databricks v1.10.0, Mar 15 23

databricks.Entitlements

This resource allows you to set entitlements to existing databricks_users, databricks.Group or databricks_service_principal.

Note You must define entitlements of a principal using either databricks.Entitlements or directly within one of databricks_users, databricks.Group or databricks_service_principal. Having entitlements defined in both resources will result in non-deterministic behaviour.

The following resources are often used in the same context:

  • End to end workspace management guide.
  • databricks.Group to manage groups in Databricks Workspace or Account Console (for AWS deployments).
  • databricks.Group data to retrieve information about databricks.Group members, entitlements and instance profiles.
  • databricks.GroupInstanceProfile to attach databricks.InstanceProfile (AWS) to databricks_group.
  • databricks.GroupMember to attach users and groups as group members.
  • databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.
  • databricks.User data to retrieve information about databricks_user.

Example Usage

Setting entitlements for a regular user

using System.Collections.Generic;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var meUser = Databricks.GetUser.Invoke(new()
    {
        UserName = "me@example.com",
    });

    var meEntitlements = new Databricks.Entitlements("meEntitlements", new()
    {
        UserId = meUser.Apply(getUserResult => getUserResult.Id),
        AllowClusterCreate = true,
        AllowInstancePoolCreate = true,
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		meUser, err := databricks.LookupUser(ctx, &databricks.LookupUserArgs{
			UserName: pulumi.StringRef("me@example.com"),
		}, nil)
		if err != nil {
			return err
		}
		_, err = databricks.NewEntitlements(ctx, "meEntitlements", &databricks.EntitlementsArgs{
			UserId:                  *pulumi.String(meUser.Id),
			AllowClusterCreate:      pulumi.Bool(true),
			AllowInstancePoolCreate: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetUserArgs;
import com.pulumi.databricks.Entitlements;
import com.pulumi.databricks.EntitlementsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var meUser = DatabricksFunctions.getUser(GetUserArgs.builder()
            .userName("me@example.com")
            .build());

        var meEntitlements = new Entitlements("meEntitlements", EntitlementsArgs.builder()        
            .userId(meUser.applyValue(getUserResult -> getUserResult.id()))
            .allowClusterCreate(true)
            .allowInstancePoolCreate(true)
            .build());

    }
}
import pulumi
import pulumi_databricks as databricks

me_user = databricks.get_user(user_name="me@example.com")
me_entitlements = databricks.Entitlements("meEntitlements",
    user_id=me_user.id,
    allow_cluster_create=True,
    allow_instance_pool_create=True)
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const meUser = databricks.getUser({
    userName: "me@example.com",
});
const meEntitlements = new databricks.Entitlements("meEntitlements", {
    userId: meUser.then(meUser => meUser.id),
    allowClusterCreate: true,
    allowInstancePoolCreate: true,
});
resources:
  meEntitlements:
    type: databricks:Entitlements
    properties:
      userId: ${meUser.id}
      allowClusterCreate: true
      allowInstancePoolCreate: true
variables:
  meUser:
    fn::invoke:
      Function: databricks:getUser
      Arguments:
        userName: me@example.com

Setting entitlements for a service principal

using System.Collections.Generic;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var thisServicePrincipal = Databricks.GetServicePrincipal.Invoke(new()
    {
        ApplicationId = "11111111-2222-3333-4444-555666777888",
    });

    var thisEntitlements = new Databricks.Entitlements("thisEntitlements", new()
    {
        ServicePrincipalId = thisServicePrincipal.Apply(getServicePrincipalResult => getServicePrincipalResult.SpId),
        AllowClusterCreate = true,
        AllowInstancePoolCreate = true,
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		thisServicePrincipal, err := databricks.LookupServicePrincipal(ctx, &databricks.LookupServicePrincipalArgs{
			ApplicationId: pulumi.StringRef("11111111-2222-3333-4444-555666777888"),
		}, nil)
		if err != nil {
			return err
		}
		_, err = databricks.NewEntitlements(ctx, "thisEntitlements", &databricks.EntitlementsArgs{
			ServicePrincipalId:      *pulumi.String(thisServicePrincipal.SpId),
			AllowClusterCreate:      pulumi.Bool(true),
			AllowInstancePoolCreate: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetServicePrincipalArgs;
import com.pulumi.databricks.Entitlements;
import com.pulumi.databricks.EntitlementsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var thisServicePrincipal = DatabricksFunctions.getServicePrincipal(GetServicePrincipalArgs.builder()
            .applicationId("11111111-2222-3333-4444-555666777888")
            .build());

        var thisEntitlements = new Entitlements("thisEntitlements", EntitlementsArgs.builder()        
            .servicePrincipalId(thisServicePrincipal.applyValue(getServicePrincipalResult -> getServicePrincipalResult.spId()))
            .allowClusterCreate(true)
            .allowInstancePoolCreate(true)
            .build());

    }
}
import pulumi
import pulumi_databricks as databricks

this_service_principal = databricks.get_service_principal(application_id="11111111-2222-3333-4444-555666777888")
this_entitlements = databricks.Entitlements("thisEntitlements",
    service_principal_id=this_service_principal.sp_id,
    allow_cluster_create=True,
    allow_instance_pool_create=True)
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const thisServicePrincipal = databricks.getServicePrincipal({
    applicationId: "11111111-2222-3333-4444-555666777888",
});
const thisEntitlements = new databricks.Entitlements("thisEntitlements", {
    servicePrincipalId: thisServicePrincipal.then(thisServicePrincipal => thisServicePrincipal.spId),
    allowClusterCreate: true,
    allowInstancePoolCreate: true,
});
resources:
  thisEntitlements:
    type: databricks:Entitlements
    properties:
      servicePrincipalId: ${thisServicePrincipal.spId}
      allowClusterCreate: true
      allowInstancePoolCreate: true
variables:
  thisServicePrincipal:
    fn::invoke:
      Function: databricks:getServicePrincipal
      Arguments:
        applicationId: 11111111-2222-3333-4444-555666777888

databricks.Group

using System.Collections.Generic;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var users = Databricks.GetGroup.Invoke(new()
    {
        DisplayName = "users",
    });

    var workspace_users = new Databricks.Entitlements("workspace-users", new()
    {
        GroupId = users.Apply(getGroupResult => getGroupResult.Id),
        AllowClusterCreate = true,
        AllowInstancePoolCreate = true,
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		users, err := databricks.LookupGroup(ctx, &databricks.LookupGroupArgs{
			DisplayName: "users",
		}, nil)
		if err != nil {
			return err
		}
		_, err = databricks.NewEntitlements(ctx, "workspace-users", &databricks.EntitlementsArgs{
			GroupId:                 *pulumi.String(users.Id),
			AllowClusterCreate:      pulumi.Bool(true),
			AllowInstancePoolCreate: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetGroupArgs;
import com.pulumi.databricks.Entitlements;
import com.pulumi.databricks.EntitlementsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var users = DatabricksFunctions.getGroup(GetGroupArgs.builder()
            .displayName("users")
            .build());

        var workspace_users = new Entitlements("workspace-users", EntitlementsArgs.builder()        
            .groupId(users.applyValue(getGroupResult -> getGroupResult.id()))
            .allowClusterCreate(true)
            .allowInstancePoolCreate(true)
            .build());

    }
}
import pulumi
import pulumi_databricks as databricks

users = databricks.get_group(display_name="users")
workspace_users = databricks.Entitlements("workspace-users",
    group_id=users.id,
    allow_cluster_create=True,
    allow_instance_pool_create=True)
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const users = databricks.getGroup({
    displayName: "users",
});
const workspace_users = new databricks.Entitlements("workspace-users", {
    groupId: users.then(users => users.id),
    allowClusterCreate: true,
    allowInstancePoolCreate: true,
});
resources:
  workspace-users:
    type: databricks:Entitlements
    properties:
      groupId: ${users.id}
      allowClusterCreate: true
      allowInstancePoolCreate: true
variables:
  users:
    fn::invoke:
      Function: databricks:getGroup
      Arguments:
        displayName: users

Create Entitlements Resource

new Entitlements(name: string, args?: EntitlementsArgs, opts?: CustomResourceOptions);
@overload
def Entitlements(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 allow_cluster_create: Optional[bool] = None,
                 allow_instance_pool_create: Optional[bool] = None,
                 databricks_sql_access: Optional[bool] = None,
                 group_id: Optional[str] = None,
                 service_principal_id: Optional[str] = None,
                 user_id: Optional[str] = None,
                 workspace_access: Optional[bool] = None)
@overload
def Entitlements(resource_name: str,
                 args: Optional[EntitlementsArgs] = None,
                 opts: Optional[ResourceOptions] = None)
func NewEntitlements(ctx *Context, name string, args *EntitlementsArgs, opts ...ResourceOption) (*Entitlements, error)
public Entitlements(string name, EntitlementsArgs? args = null, CustomResourceOptions? opts = null)
public Entitlements(String name, EntitlementsArgs args)
public Entitlements(String name, EntitlementsArgs args, CustomResourceOptions options)
type: databricks:Entitlements
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

name string
The unique name of the resource.
args EntitlementsArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name str
The unique name of the resource.
args EntitlementsArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name string
The unique name of the resource.
args EntitlementsArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name string
The unique name of the resource.
args EntitlementsArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name String
The unique name of the resource.
args EntitlementsArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Entitlements Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

The Entitlements resource accepts the following input properties:

AllowClusterCreate bool

Allow the principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and cluster_id argument. Everyone without allow_cluster_create argument set, but with permission to use Cluster Policy would be able to create clusters, but within boundaries of that specific policy.

AllowInstancePoolCreate bool

Allow the principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.

DatabricksSqlAccess bool

This is a field to allow the principal to have access to Databricks SQL feature in User Interface and through databricks_sql_endpoint.

GroupId string

Canonical unique identifier for the group.

ServicePrincipalId string

Canonical unique identifier for the service principal.

UserId string

Canonical unique identifier for the user.

WorkspaceAccess bool

This is a field to allow the principal to have access to Databricks Workspace.

AllowClusterCreate bool

Allow the principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and cluster_id argument. Everyone without allow_cluster_create argument set, but with permission to use Cluster Policy would be able to create clusters, but within boundaries of that specific policy.

AllowInstancePoolCreate bool

Allow the principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.

DatabricksSqlAccess bool

This is a field to allow the principal to have access to Databricks SQL feature in User Interface and through databricks_sql_endpoint.

GroupId string

Canonical unique identifier for the group.

ServicePrincipalId string

Canonical unique identifier for the service principal.

UserId string

Canonical unique identifier for the user.

WorkspaceAccess bool

This is a field to allow the principal to have access to Databricks Workspace.

allowClusterCreate Boolean

Allow the principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and cluster_id argument. Everyone without allow_cluster_create argument set, but with permission to use Cluster Policy would be able to create clusters, but within boundaries of that specific policy.

allowInstancePoolCreate Boolean

Allow the principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.

databricksSqlAccess Boolean

This is a field to allow the principal to have access to Databricks SQL feature in User Interface and through databricks_sql_endpoint.

groupId String

Canonical unique identifier for the group.

servicePrincipalId String

Canonical unique identifier for the service principal.

userId String

Canonical unique identifier for the user.

workspaceAccess Boolean

This is a field to allow the principal to have access to Databricks Workspace.

allowClusterCreate boolean

Allow the principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and cluster_id argument. Everyone without allow_cluster_create argument set, but with permission to use Cluster Policy would be able to create clusters, but within boundaries of that specific policy.

allowInstancePoolCreate boolean

Allow the principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.

databricksSqlAccess boolean

This is a field to allow the principal to have access to Databricks SQL feature in User Interface and through databricks_sql_endpoint.

groupId string

Canonical unique identifier for the group.

servicePrincipalId string

Canonical unique identifier for the service principal.

userId string

Canonical unique identifier for the user.

workspaceAccess boolean

This is a field to allow the principal to have access to Databricks Workspace.

allow_cluster_create bool

Allow the principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and cluster_id argument. Everyone without allow_cluster_create argument set, but with permission to use Cluster Policy would be able to create clusters, but within boundaries of that specific policy.

allow_instance_pool_create bool

Allow the principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.

databricks_sql_access bool

This is a field to allow the principal to have access to Databricks SQL feature in User Interface and through databricks_sql_endpoint.

group_id str

Canonical unique identifier for the group.

service_principal_id str

Canonical unique identifier for the service principal.

user_id str

Canonical unique identifier for the user.

workspace_access bool

This is a field to allow the principal to have access to Databricks Workspace.

allowClusterCreate Boolean

Allow the principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and cluster_id argument. Everyone without allow_cluster_create argument set, but with permission to use Cluster Policy would be able to create clusters, but within boundaries of that specific policy.

allowInstancePoolCreate Boolean

Allow the principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.

databricksSqlAccess Boolean

This is a field to allow the principal to have access to Databricks SQL feature in User Interface and through databricks_sql_endpoint.

groupId String

Canonical unique identifier for the group.

servicePrincipalId String

Canonical unique identifier for the service principal.

userId String

Canonical unique identifier for the user.

workspaceAccess Boolean

This is a field to allow the principal to have access to Databricks Workspace.

Outputs

All input properties are implicitly available as output properties. Additionally, the Entitlements resource produces the following output properties:

Id string

The provider-assigned unique ID for this managed resource.

Id string

The provider-assigned unique ID for this managed resource.

id String

The provider-assigned unique ID for this managed resource.

id string

The provider-assigned unique ID for this managed resource.

id str

The provider-assigned unique ID for this managed resource.

id String

The provider-assigned unique ID for this managed resource.

Look up Existing Entitlements Resource

Get an existing Entitlements resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: EntitlementsState, opts?: CustomResourceOptions): Entitlements
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        allow_cluster_create: Optional[bool] = None,
        allow_instance_pool_create: Optional[bool] = None,
        databricks_sql_access: Optional[bool] = None,
        group_id: Optional[str] = None,
        service_principal_id: Optional[str] = None,
        user_id: Optional[str] = None,
        workspace_access: Optional[bool] = None) -> Entitlements
func GetEntitlements(ctx *Context, name string, id IDInput, state *EntitlementsState, opts ...ResourceOption) (*Entitlements, error)
public static Entitlements Get(string name, Input<string> id, EntitlementsState? state, CustomResourceOptions? opts = null)
public static Entitlements get(String name, Output<String> id, EntitlementsState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
AllowClusterCreate bool

Allow the principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and cluster_id argument. Everyone without allow_cluster_create argument set, but with permission to use Cluster Policy would be able to create clusters, but within boundaries of that specific policy.

AllowInstancePoolCreate bool

Allow the principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.

DatabricksSqlAccess bool

This is a field to allow the principal to have access to Databricks SQL feature in User Interface and through databricks_sql_endpoint.

GroupId string

Canonical unique identifier for the group.

ServicePrincipalId string

Canonical unique identifier for the service principal.

UserId string

Canonical unique identifier for the user.

WorkspaceAccess bool

This is a field to allow the principal to have access to Databricks Workspace.

AllowClusterCreate bool

Allow the principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and cluster_id argument. Everyone without allow_cluster_create argument set, but with permission to use Cluster Policy would be able to create clusters, but within boundaries of that specific policy.

AllowInstancePoolCreate bool

Allow the principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.

DatabricksSqlAccess bool

This is a field to allow the principal to have access to Databricks SQL feature in User Interface and through databricks_sql_endpoint.

GroupId string

Canonical unique identifier for the group.

ServicePrincipalId string

Canonical unique identifier for the service principal.

UserId string

Canonical unique identifier for the user.

WorkspaceAccess bool

This is a field to allow the principal to have access to Databricks Workspace.

allowClusterCreate Boolean

Allow the principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and cluster_id argument. Everyone without allow_cluster_create argument set, but with permission to use Cluster Policy would be able to create clusters, but within boundaries of that specific policy.

allowInstancePoolCreate Boolean

Allow the principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.

databricksSqlAccess Boolean

This is a field to allow the principal to have access to Databricks SQL feature in User Interface and through databricks_sql_endpoint.

groupId String

Canonical unique identifier for the group.

servicePrincipalId String

Canonical unique identifier for the service principal.

userId String

Canonical unique identifier for the user.

workspaceAccess Boolean

This is a field to allow the principal to have access to Databricks Workspace.

allowClusterCreate boolean

Allow the principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and cluster_id argument. Everyone without allow_cluster_create argument set, but with permission to use Cluster Policy would be able to create clusters, but within boundaries of that specific policy.

allowInstancePoolCreate boolean

Allow the principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.

databricksSqlAccess boolean

This is a field to allow the principal to have access to Databricks SQL feature in User Interface and through databricks_sql_endpoint.

groupId string

Canonical unique identifier for the group.

servicePrincipalId string

Canonical unique identifier for the service principal.

userId string

Canonical unique identifier for the user.

workspaceAccess boolean

This is a field to allow the principal to have access to Databricks Workspace.

allow_cluster_create bool

Allow the principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and cluster_id argument. Everyone without allow_cluster_create argument set, but with permission to use Cluster Policy would be able to create clusters, but within boundaries of that specific policy.

allow_instance_pool_create bool

Allow the principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.

databricks_sql_access bool

This is a field to allow the principal to have access to Databricks SQL feature in User Interface and through databricks_sql_endpoint.

group_id str

Canonical unique identifier for the group.

service_principal_id str

Canonical unique identifier for the service principal.

user_id str

Canonical unique identifier for the user.

workspace_access bool

This is a field to allow the principal to have access to Databricks Workspace.

allowClusterCreate Boolean

Allow the principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and cluster_id argument. Everyone without allow_cluster_create argument set, but with permission to use Cluster Policy would be able to create clusters, but within boundaries of that specific policy.

allowInstancePoolCreate Boolean

Allow the principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.

databricksSqlAccess Boolean

This is a field to allow the principal to have access to Databricks SQL feature in User Interface and through databricks_sql_endpoint.

groupId String

Canonical unique identifier for the group.

servicePrincipalId String

Canonical unique identifier for the service principal.

userId String

Canonical unique identifier for the user.

workspaceAccess Boolean

This is a field to allow the principal to have access to Databricks Workspace.

Import

The resource can be imported using a synthetic identifier. Examples of valid synthetic identifiers are* user/user_id - user user_id. * group/group_id - group group_id. * spn/spn_id - service principal spn_id. bash

 $ pulumi import databricks:index/entitlements:Entitlements me user/<user-id>

Package Details

Repository
databricks pulumi/pulumi-databricks
License
Apache-2.0
Notes

This Pulumi package is based on the databricks Terraform Provider.