databricks.ServicePrincipal
Explore with Pulumi AI
Directly manage Service Principals that could be added to databricks.Group in Databricks workspace or account.
Note To assign account level service principals to workspace use databricks_mws_permission_assignment.
To create service principals in the Databricks account, the provider must be configured with host = "https://accounts.cloud.databricks.com"
on AWS deployments or host = "https://accounts.azuredatabricks.net"
and authenticate using AAD tokens on Azure deployments
Related Resources
The following resources are often used in the same context:
- End to end workspace management guide.
- databricks.Group to manage groups in Databricks Workspace or Account Console (for AWS deployments).
- databricks.Group data to retrieve information about databricks.Group members, entitlements and instance profiles.
- databricks.GroupMember to attach users and groups as group members.
- databricks.Permissions to manage access control in Databricks workspace.
- databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and more to manage secrets for the service principal (only for AWS deployments)
Example Usage
Creating regular service principal
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var sp = new Databricks.ServicePrincipal("sp", new()
{
ApplicationId = "00000000-0000-0000-0000-000000000000",
});
});
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.NewServicePrincipal(ctx, "sp", &databricks.ServicePrincipalArgs{
ApplicationId: pulumi.String("00000000-0000-0000-0000-000000000000"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.ServicePrincipal;
import com.pulumi.databricks.ServicePrincipalArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var sp = new ServicePrincipal("sp", ServicePrincipalArgs.builder()
.applicationId("00000000-0000-0000-0000-000000000000")
.build());
}
}
import pulumi
import pulumi_databricks as databricks
sp = databricks.ServicePrincipal("sp", application_id="00000000-0000-0000-0000-000000000000")
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const sp = new databricks.ServicePrincipal("sp", {applicationId: "00000000-0000-0000-0000-000000000000"});
resources:
sp:
type: databricks:ServicePrincipal
properties:
applicationId: 00000000-0000-0000-0000-000000000000
databricks.Group in databricks.GroupMember resource
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var admins = Databricks.GetGroup.Invoke(new()
{
DisplayName = "admins",
});
var sp = new Databricks.ServicePrincipal("sp", new()
{
ApplicationId = "00000000-0000-0000-0000-000000000000",
});
var i_am_admin = new Databricks.GroupMember("i-am-admin", new()
{
GroupId = admins.Apply(getGroupResult => getGroupResult.Id),
MemberId = sp.Id,
});
});
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
admins, err := databricks.LookupGroup(ctx, &databricks.LookupGroupArgs{
DisplayName: "admins",
}, nil)
if err != nil {
return err
}
sp, err := databricks.NewServicePrincipal(ctx, "sp", &databricks.ServicePrincipalArgs{
ApplicationId: pulumi.String("00000000-0000-0000-0000-000000000000"),
})
if err != nil {
return err
}
_, err = databricks.NewGroupMember(ctx, "i-am-admin", &databricks.GroupMemberArgs{
GroupId: *pulumi.String(admins.Id),
MemberId: sp.ID(),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetGroupArgs;
import com.pulumi.databricks.ServicePrincipal;
import com.pulumi.databricks.ServicePrincipalArgs;
import com.pulumi.databricks.GroupMember;
import com.pulumi.databricks.GroupMemberArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var admins = DatabricksFunctions.getGroup(GetGroupArgs.builder()
.displayName("admins")
.build());
var sp = new ServicePrincipal("sp", ServicePrincipalArgs.builder()
.applicationId("00000000-0000-0000-0000-000000000000")
.build());
var i_am_admin = new GroupMember("i-am-admin", GroupMemberArgs.builder()
.groupId(admins.applyValue(getGroupResult -> getGroupResult.id()))
.memberId(sp.id())
.build());
}
}
import pulumi
import pulumi_databricks as databricks
admins = databricks.get_group(display_name="admins")
sp = databricks.ServicePrincipal("sp", application_id="00000000-0000-0000-0000-000000000000")
i_am_admin = databricks.GroupMember("i-am-admin",
group_id=admins.id,
member_id=sp.id)
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const admins = databricks.getGroup({
displayName: "admins",
});
const sp = new databricks.ServicePrincipal("sp", {applicationId: "00000000-0000-0000-0000-000000000000"});
const i_am_admin = new databricks.GroupMember("i-am-admin", {
groupId: admins.then(admins => admins.id),
memberId: sp.id,
});
resources:
sp:
type: databricks:ServicePrincipal
properties:
applicationId: 00000000-0000-0000-0000-000000000000
i-am-admin:
type: databricks:GroupMember
properties:
groupId: ${admins.id}
memberId: ${sp.id}
variables:
admins:
fn::invoke:
Function: databricks:getGroup
Arguments:
displayName: admins
Creating service principal with cluster create permissions
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var sp = new Databricks.ServicePrincipal("sp", new()
{
AllowClusterCreate = true,
ApplicationId = "00000000-0000-0000-0000-000000000000",
DisplayName = "Example service principal",
});
});
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.NewServicePrincipal(ctx, "sp", &databricks.ServicePrincipalArgs{
AllowClusterCreate: pulumi.Bool(true),
ApplicationId: pulumi.String("00000000-0000-0000-0000-000000000000"),
DisplayName: pulumi.String("Example service principal"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.ServicePrincipal;
import com.pulumi.databricks.ServicePrincipalArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var sp = new ServicePrincipal("sp", ServicePrincipalArgs.builder()
.allowClusterCreate(true)
.applicationId("00000000-0000-0000-0000-000000000000")
.displayName("Example service principal")
.build());
}
}
import pulumi
import pulumi_databricks as databricks
sp = databricks.ServicePrincipal("sp",
allow_cluster_create=True,
application_id="00000000-0000-0000-0000-000000000000",
display_name="Example service principal")
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const sp = new databricks.ServicePrincipal("sp", {
allowClusterCreate: true,
applicationId: "00000000-0000-0000-0000-000000000000",
displayName: "Example service principal",
});
resources:
sp:
type: databricks:ServicePrincipal
properties:
allowClusterCreate: true
applicationId: 00000000-0000-0000-0000-000000000000
displayName: Example service principal
Creating service principal in AWS Databricks account
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
// initialize provider at account-level
var mws = new Databricks.Provider("mws", new()
{
Host = "https://accounts.cloud.databricks.com",
AccountId = "00000000-0000-0000-0000-000000000000",
Username = @var.Databricks_account_username,
Password = @var.Databricks_account_password,
});
var sp = new Databricks.ServicePrincipal("sp", new()
{
DisplayName = "Automation-only SP",
}, new CustomResourceOptions
{
Provider = databricks.Mws,
});
});
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.NewProvider(ctx, "mws", &databricks.ProviderArgs{
Host: pulumi.String("https://accounts.cloud.databricks.com"),
AccountId: pulumi.String("00000000-0000-0000-0000-000000000000"),
Username: pulumi.Any(_var.Databricks_account_username),
Password: pulumi.Any(_var.Databricks_account_password),
})
if err != nil {
return err
}
_, err = databricks.NewServicePrincipal(ctx, "sp", &databricks.ServicePrincipalArgs{
DisplayName: pulumi.String("Automation-only SP"),
}, pulumi.Provider(databricks.Mws))
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Provider;
import com.pulumi.databricks.ProviderArgs;
import com.pulumi.databricks.ServicePrincipal;
import com.pulumi.databricks.ServicePrincipalArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var mws = new Provider("mws", ProviderArgs.builder()
.host("https://accounts.cloud.databricks.com")
.accountId("00000000-0000-0000-0000-000000000000")
.username(var_.databricks_account_username())
.password(var_.databricks_account_password())
.build());
var sp = new ServicePrincipal("sp", ServicePrincipalArgs.builder()
.displayName("Automation-only SP")
.build(), CustomResourceOptions.builder()
.provider(databricks.mws())
.build());
}
}
import pulumi
import pulumi_databricks as databricks
# initialize provider at account-level
mws = databricks.Provider("mws",
host="https://accounts.cloud.databricks.com",
account_id="00000000-0000-0000-0000-000000000000",
username=var["databricks_account_username"],
password=var["databricks_account_password"])
sp = databricks.ServicePrincipal("sp", display_name="Automation-only SP",
opts=pulumi.ResourceOptions(provider=databricks["mws"]))
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
// initialize provider at account-level
const mws = new databricks.Provider("mws", {
host: "https://accounts.cloud.databricks.com",
accountId: "00000000-0000-0000-0000-000000000000",
username: _var.databricks_account_username,
password: _var.databricks_account_password,
});
const sp = new databricks.ServicePrincipal("sp", {displayName: "Automation-only SP"}, {
provider: databricks.mws,
});
resources:
# initialize provider at account-level
mws:
type: pulumi:providers:databricks
properties:
host: https://accounts.cloud.databricks.com
accountId: 00000000-0000-0000-0000-000000000000
username: ${var.databricks_account_username}
password: ${var.databricks_account_password}
sp:
type: databricks:ServicePrincipal
properties:
displayName: Automation-only SP
options:
provider: ${databricks.mws}
Creating service principal in Azure Databricks account
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
// initialize provider at Azure account-level
var azureAccount = new Databricks.Provider("azureAccount", new()
{
Host = "https://accounts.azuredatabricks.net",
AccountId = "00000000-0000-0000-0000-000000000000",
AuthType = "azure-cli",
});
var sp = new Databricks.ServicePrincipal("sp", new()
{
ApplicationId = "00000000-0000-0000-0000-000000000000",
}, new CustomResourceOptions
{
Provider = databricks.Azure_account,
});
});
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.NewProvider(ctx, "azureAccount", &databricks.ProviderArgs{
Host: pulumi.String("https://accounts.azuredatabricks.net"),
AccountId: pulumi.String("00000000-0000-0000-0000-000000000000"),
AuthType: pulumi.String("azure-cli"),
})
if err != nil {
return err
}
_, err = databricks.NewServicePrincipal(ctx, "sp", &databricks.ServicePrincipalArgs{
ApplicationId: pulumi.String("00000000-0000-0000-0000-000000000000"),
}, pulumi.Provider(databricks.Azure_account))
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Provider;
import com.pulumi.databricks.ProviderArgs;
import com.pulumi.databricks.ServicePrincipal;
import com.pulumi.databricks.ServicePrincipalArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var azureAccount = new Provider("azureAccount", ProviderArgs.builder()
.host("https://accounts.azuredatabricks.net")
.accountId("00000000-0000-0000-0000-000000000000")
.authType("azure-cli")
.build());
var sp = new ServicePrincipal("sp", ServicePrincipalArgs.builder()
.applicationId("00000000-0000-0000-0000-000000000000")
.build(), CustomResourceOptions.builder()
.provider(databricks.azure_account())
.build());
}
}
import pulumi
import pulumi_databricks as databricks
# initialize provider at Azure account-level
azure_account = databricks.Provider("azureAccount",
host="https://accounts.azuredatabricks.net",
account_id="00000000-0000-0000-0000-000000000000",
auth_type="azure-cli")
sp = databricks.ServicePrincipal("sp", application_id="00000000-0000-0000-0000-000000000000",
opts=pulumi.ResourceOptions(provider=databricks["azure_account"]))
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
// initialize provider at Azure account-level
const azureAccount = new databricks.Provider("azureAccount", {
host: "https://accounts.azuredatabricks.net",
accountId: "00000000-0000-0000-0000-000000000000",
authType: "azure-cli",
});
const sp = new databricks.ServicePrincipal("sp", {applicationId: "00000000-0000-0000-0000-000000000000"}, {
provider: databricks.azure_account,
});
resources:
# initialize provider at Azure account-level
azureAccount:
type: pulumi:providers:databricks
properties:
host: https://accounts.azuredatabricks.net
accountId: 00000000-0000-0000-0000-000000000000
authType: azure-cli
sp:
type: databricks:ServicePrincipal
properties:
applicationId: 00000000-0000-0000-0000-000000000000
options:
provider: ${databricks.azure_account}
Create ServicePrincipal Resource
new ServicePrincipal(name: string, args?: ServicePrincipalArgs, opts?: CustomResourceOptions);
@overload
def ServicePrincipal(resource_name: str,
opts: Optional[ResourceOptions] = None,
active: Optional[bool] = None,
allow_cluster_create: Optional[bool] = None,
allow_instance_pool_create: Optional[bool] = None,
application_id: Optional[str] = None,
databricks_sql_access: Optional[bool] = None,
display_name: Optional[str] = None,
external_id: Optional[str] = None,
force: Optional[bool] = None,
force_delete_home_dir: Optional[bool] = None,
force_delete_repos: Optional[bool] = None,
home: Optional[str] = None,
repos: Optional[str] = None,
workspace_access: Optional[bool] = None)
@overload
def ServicePrincipal(resource_name: str,
args: Optional[ServicePrincipalArgs] = None,
opts: Optional[ResourceOptions] = None)
func NewServicePrincipal(ctx *Context, name string, args *ServicePrincipalArgs, opts ...ResourceOption) (*ServicePrincipal, error)
public ServicePrincipal(string name, ServicePrincipalArgs? args = null, CustomResourceOptions? opts = null)
public ServicePrincipal(String name, ServicePrincipalArgs args)
public ServicePrincipal(String name, ServicePrincipalArgs args, CustomResourceOptions options)
type: databricks:ServicePrincipal
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ServicePrincipalArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ServicePrincipalArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ServicePrincipalArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ServicePrincipalArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ServicePrincipalArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
ServicePrincipal Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The ServicePrincipal resource accepts the following input properties:
- Active bool
Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets.
- Allow
Cluster boolCreate Allow the service principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and
cluster_id
argument. Everyone withoutallow_cluster_create
argument set, but with permission to use Cluster Policy would be able to create clusters, but within the boundaries of that specific policy.- Allow
Instance boolPool Create Allow the service principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.
- Application
Id string This is the Azure Application ID of the given Azure service principal and will be their form of access and identity. On other clouds than Azure this value is auto-generated.
- Databricks
Sql boolAccess This is a field to allow the group to have access to Databricks SQL feature through databricks_sql_endpoint.
- Display
Name string This is an alias for the service principal and can be the full name of the service principal.
- External
Id string ID of the service principal in an external identity provider.
- Force bool
- Force
Delete boolHome Dir This flag determines whether the service principal's home directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- Force
Delete boolRepos This flag determines whether the service principal's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- Home string
Home folder of the service principal, e.g.
/Users/00000000-0000-0000-0000-000000000000
.- Repos string
Personal Repos location of the service principal, e.g.
/Repos/00000000-0000-0000-0000-000000000000
.- Workspace
Access bool This is a field to allow the group to have access to Databricks Workspace.
- Active bool
Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets.
- Allow
Cluster boolCreate Allow the service principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and
cluster_id
argument. Everyone withoutallow_cluster_create
argument set, but with permission to use Cluster Policy would be able to create clusters, but within the boundaries of that specific policy.- Allow
Instance boolPool Create Allow the service principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.
- Application
Id string This is the Azure Application ID of the given Azure service principal and will be their form of access and identity. On other clouds than Azure this value is auto-generated.
- Databricks
Sql boolAccess This is a field to allow the group to have access to Databricks SQL feature through databricks_sql_endpoint.
- Display
Name string This is an alias for the service principal and can be the full name of the service principal.
- External
Id string ID of the service principal in an external identity provider.
- Force bool
- Force
Delete boolHome Dir This flag determines whether the service principal's home directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- Force
Delete boolRepos This flag determines whether the service principal's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- Home string
Home folder of the service principal, e.g.
/Users/00000000-0000-0000-0000-000000000000
.- Repos string
Personal Repos location of the service principal, e.g.
/Repos/00000000-0000-0000-0000-000000000000
.- Workspace
Access bool This is a field to allow the group to have access to Databricks Workspace.
- active Boolean
Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets.
- allow
Cluster BooleanCreate Allow the service principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and
cluster_id
argument. Everyone withoutallow_cluster_create
argument set, but with permission to use Cluster Policy would be able to create clusters, but within the boundaries of that specific policy.- allow
Instance BooleanPool Create Allow the service principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.
- application
Id String This is the Azure Application ID of the given Azure service principal and will be their form of access and identity. On other clouds than Azure this value is auto-generated.
- databricks
Sql BooleanAccess This is a field to allow the group to have access to Databricks SQL feature through databricks_sql_endpoint.
- display
Name String This is an alias for the service principal and can be the full name of the service principal.
- external
Id String ID of the service principal in an external identity provider.
- force Boolean
- force
Delete BooleanHome Dir This flag determines whether the service principal's home directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- force
Delete BooleanRepos This flag determines whether the service principal's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- home String
Home folder of the service principal, e.g.
/Users/00000000-0000-0000-0000-000000000000
.- repos String
Personal Repos location of the service principal, e.g.
/Repos/00000000-0000-0000-0000-000000000000
.- workspace
Access Boolean This is a field to allow the group to have access to Databricks Workspace.
- active boolean
Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets.
- allow
Cluster booleanCreate Allow the service principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and
cluster_id
argument. Everyone withoutallow_cluster_create
argument set, but with permission to use Cluster Policy would be able to create clusters, but within the boundaries of that specific policy.- allow
Instance booleanPool Create Allow the service principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.
- application
Id string This is the Azure Application ID of the given Azure service principal and will be their form of access and identity. On other clouds than Azure this value is auto-generated.
- databricks
Sql booleanAccess This is a field to allow the group to have access to Databricks SQL feature through databricks_sql_endpoint.
- display
Name string This is an alias for the service principal and can be the full name of the service principal.
- external
Id string ID of the service principal in an external identity provider.
- force boolean
- force
Delete booleanHome Dir This flag determines whether the service principal's home directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- force
Delete booleanRepos This flag determines whether the service principal's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- home string
Home folder of the service principal, e.g.
/Users/00000000-0000-0000-0000-000000000000
.- repos string
Personal Repos location of the service principal, e.g.
/Repos/00000000-0000-0000-0000-000000000000
.- workspace
Access boolean This is a field to allow the group to have access to Databricks Workspace.
- active bool
Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets.
- allow_
cluster_ boolcreate Allow the service principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and
cluster_id
argument. Everyone withoutallow_cluster_create
argument set, but with permission to use Cluster Policy would be able to create clusters, but within the boundaries of that specific policy.- allow_
instance_ boolpool_ create Allow the service principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.
- application_
id str This is the Azure Application ID of the given Azure service principal and will be their form of access and identity. On other clouds than Azure this value is auto-generated.
- databricks_
sql_ boolaccess This is a field to allow the group to have access to Databricks SQL feature through databricks_sql_endpoint.
- display_
name str This is an alias for the service principal and can be the full name of the service principal.
- external_
id str ID of the service principal in an external identity provider.
- force bool
- force_
delete_ boolhome_ dir This flag determines whether the service principal's home directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- force_
delete_ boolrepos This flag determines whether the service principal's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- home str
Home folder of the service principal, e.g.
/Users/00000000-0000-0000-0000-000000000000
.- repos str
Personal Repos location of the service principal, e.g.
/Repos/00000000-0000-0000-0000-000000000000
.- workspace_
access bool This is a field to allow the group to have access to Databricks Workspace.
- active Boolean
Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets.
- allow
Cluster BooleanCreate Allow the service principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and
cluster_id
argument. Everyone withoutallow_cluster_create
argument set, but with permission to use Cluster Policy would be able to create clusters, but within the boundaries of that specific policy.- allow
Instance BooleanPool Create Allow the service principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.
- application
Id String This is the Azure Application ID of the given Azure service principal and will be their form of access and identity. On other clouds than Azure this value is auto-generated.
- databricks
Sql BooleanAccess This is a field to allow the group to have access to Databricks SQL feature through databricks_sql_endpoint.
- display
Name String This is an alias for the service principal and can be the full name of the service principal.
- external
Id String ID of the service principal in an external identity provider.
- force Boolean
- force
Delete BooleanHome Dir This flag determines whether the service principal's home directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- force
Delete BooleanRepos This flag determines whether the service principal's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- home String
Home folder of the service principal, e.g.
/Users/00000000-0000-0000-0000-000000000000
.- repos String
Personal Repos location of the service principal, e.g.
/Repos/00000000-0000-0000-0000-000000000000
.- workspace
Access Boolean This is a field to allow the group to have access to Databricks Workspace.
Outputs
All input properties are implicitly available as output properties. Additionally, the ServicePrincipal resource produces the following output properties:
- Id string
The provider-assigned unique ID for this managed resource.
- Id string
The provider-assigned unique ID for this managed resource.
- id String
The provider-assigned unique ID for this managed resource.
- id string
The provider-assigned unique ID for this managed resource.
- id str
The provider-assigned unique ID for this managed resource.
- id String
The provider-assigned unique ID for this managed resource.
Look up Existing ServicePrincipal Resource
Get an existing ServicePrincipal resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ServicePrincipalState, opts?: CustomResourceOptions): ServicePrincipal
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
active: Optional[bool] = None,
allow_cluster_create: Optional[bool] = None,
allow_instance_pool_create: Optional[bool] = None,
application_id: Optional[str] = None,
databricks_sql_access: Optional[bool] = None,
display_name: Optional[str] = None,
external_id: Optional[str] = None,
force: Optional[bool] = None,
force_delete_home_dir: Optional[bool] = None,
force_delete_repos: Optional[bool] = None,
home: Optional[str] = None,
repos: Optional[str] = None,
workspace_access: Optional[bool] = None) -> ServicePrincipal
func GetServicePrincipal(ctx *Context, name string, id IDInput, state *ServicePrincipalState, opts ...ResourceOption) (*ServicePrincipal, error)
public static ServicePrincipal Get(string name, Input<string> id, ServicePrincipalState? state, CustomResourceOptions? opts = null)
public static ServicePrincipal get(String name, Output<String> id, ServicePrincipalState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Active bool
Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets.
- Allow
Cluster boolCreate Allow the service principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and
cluster_id
argument. Everyone withoutallow_cluster_create
argument set, but with permission to use Cluster Policy would be able to create clusters, but within the boundaries of that specific policy.- Allow
Instance boolPool Create Allow the service principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.
- Application
Id string This is the Azure Application ID of the given Azure service principal and will be their form of access and identity. On other clouds than Azure this value is auto-generated.
- Databricks
Sql boolAccess This is a field to allow the group to have access to Databricks SQL feature through databricks_sql_endpoint.
- Display
Name string This is an alias for the service principal and can be the full name of the service principal.
- External
Id string ID of the service principal in an external identity provider.
- Force bool
- Force
Delete boolHome Dir This flag determines whether the service principal's home directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- Force
Delete boolRepos This flag determines whether the service principal's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- Home string
Home folder of the service principal, e.g.
/Users/00000000-0000-0000-0000-000000000000
.- Repos string
Personal Repos location of the service principal, e.g.
/Repos/00000000-0000-0000-0000-000000000000
.- Workspace
Access bool This is a field to allow the group to have access to Databricks Workspace.
- Active bool
Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets.
- Allow
Cluster boolCreate Allow the service principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and
cluster_id
argument. Everyone withoutallow_cluster_create
argument set, but with permission to use Cluster Policy would be able to create clusters, but within the boundaries of that specific policy.- Allow
Instance boolPool Create Allow the service principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.
- Application
Id string This is the Azure Application ID of the given Azure service principal and will be their form of access and identity. On other clouds than Azure this value is auto-generated.
- Databricks
Sql boolAccess This is a field to allow the group to have access to Databricks SQL feature through databricks_sql_endpoint.
- Display
Name string This is an alias for the service principal and can be the full name of the service principal.
- External
Id string ID of the service principal in an external identity provider.
- Force bool
- Force
Delete boolHome Dir This flag determines whether the service principal's home directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- Force
Delete boolRepos This flag determines whether the service principal's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- Home string
Home folder of the service principal, e.g.
/Users/00000000-0000-0000-0000-000000000000
.- Repos string
Personal Repos location of the service principal, e.g.
/Repos/00000000-0000-0000-0000-000000000000
.- Workspace
Access bool This is a field to allow the group to have access to Databricks Workspace.
- active Boolean
Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets.
- allow
Cluster BooleanCreate Allow the service principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and
cluster_id
argument. Everyone withoutallow_cluster_create
argument set, but with permission to use Cluster Policy would be able to create clusters, but within the boundaries of that specific policy.- allow
Instance BooleanPool Create Allow the service principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.
- application
Id String This is the Azure Application ID of the given Azure service principal and will be their form of access and identity. On other clouds than Azure this value is auto-generated.
- databricks
Sql BooleanAccess This is a field to allow the group to have access to Databricks SQL feature through databricks_sql_endpoint.
- display
Name String This is an alias for the service principal and can be the full name of the service principal.
- external
Id String ID of the service principal in an external identity provider.
- force Boolean
- force
Delete BooleanHome Dir This flag determines whether the service principal's home directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- force
Delete BooleanRepos This flag determines whether the service principal's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- home String
Home folder of the service principal, e.g.
/Users/00000000-0000-0000-0000-000000000000
.- repos String
Personal Repos location of the service principal, e.g.
/Repos/00000000-0000-0000-0000-000000000000
.- workspace
Access Boolean This is a field to allow the group to have access to Databricks Workspace.
- active boolean
Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets.
- allow
Cluster booleanCreate Allow the service principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and
cluster_id
argument. Everyone withoutallow_cluster_create
argument set, but with permission to use Cluster Policy would be able to create clusters, but within the boundaries of that specific policy.- allow
Instance booleanPool Create Allow the service principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.
- application
Id string This is the Azure Application ID of the given Azure service principal and will be their form of access and identity. On other clouds than Azure this value is auto-generated.
- databricks
Sql booleanAccess This is a field to allow the group to have access to Databricks SQL feature through databricks_sql_endpoint.
- display
Name string This is an alias for the service principal and can be the full name of the service principal.
- external
Id string ID of the service principal in an external identity provider.
- force boolean
- force
Delete booleanHome Dir This flag determines whether the service principal's home directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- force
Delete booleanRepos This flag determines whether the service principal's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- home string
Home folder of the service principal, e.g.
/Users/00000000-0000-0000-0000-000000000000
.- repos string
Personal Repos location of the service principal, e.g.
/Repos/00000000-0000-0000-0000-000000000000
.- workspace
Access boolean This is a field to allow the group to have access to Databricks Workspace.
- active bool
Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets.
- allow_
cluster_ boolcreate Allow the service principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and
cluster_id
argument. Everyone withoutallow_cluster_create
argument set, but with permission to use Cluster Policy would be able to create clusters, but within the boundaries of that specific policy.- allow_
instance_ boolpool_ create Allow the service principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.
- application_
id str This is the Azure Application ID of the given Azure service principal and will be their form of access and identity. On other clouds than Azure this value is auto-generated.
- databricks_
sql_ boolaccess This is a field to allow the group to have access to Databricks SQL feature through databricks_sql_endpoint.
- display_
name str This is an alias for the service principal and can be the full name of the service principal.
- external_
id str ID of the service principal in an external identity provider.
- force bool
- force_
delete_ boolhome_ dir This flag determines whether the service principal's home directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- force_
delete_ boolrepos This flag determines whether the service principal's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- home str
Home folder of the service principal, e.g.
/Users/00000000-0000-0000-0000-000000000000
.- repos str
Personal Repos location of the service principal, e.g.
/Repos/00000000-0000-0000-0000-000000000000
.- workspace_
access bool This is a field to allow the group to have access to Databricks Workspace.
- active Boolean
Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets.
- allow
Cluster BooleanCreate Allow the service principal to have cluster create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and
cluster_id
argument. Everyone withoutallow_cluster_create
argument set, but with permission to use Cluster Policy would be able to create clusters, but within the boundaries of that specific policy.- allow
Instance BooleanPool Create Allow the service principal to have instance pool create privileges. Defaults to false. More fine grained permissions could be assigned with databricks.Permissions and instance_pool_id argument.
- application
Id String This is the Azure Application ID of the given Azure service principal and will be their form of access and identity. On other clouds than Azure this value is auto-generated.
- databricks
Sql BooleanAccess This is a field to allow the group to have access to Databricks SQL feature through databricks_sql_endpoint.
- display
Name String This is an alias for the service principal and can be the full name of the service principal.
- external
Id String ID of the service principal in an external identity provider.
- force Boolean
- force
Delete BooleanHome Dir This flag determines whether the service principal's home directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- force
Delete BooleanRepos This flag determines whether the service principal's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default.
- home String
Home folder of the service principal, e.g.
/Users/00000000-0000-0000-0000-000000000000
.- repos String
Personal Repos location of the service principal, e.g.
/Repos/00000000-0000-0000-0000-000000000000
.- workspace
Access Boolean This is a field to allow the group to have access to Databricks Workspace.
Import
The resource scim service principal can be imported using its id, for example 2345678901234567
. To get the service principal ID, call Get service principals. bash
$ pulumi import databricks:index/servicePrincipal:ServicePrincipal me <service-principal-id>
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
This Pulumi package is based on the
databricks
Terraform Provider.