databricks logo
Databricks v1.14.0, May 23 23

databricks.SqlEndpoint

Explore with Pulumi AI

This resource is used to manage Databricks SQL warehouses. To create SQL warehouses you must have databricks_sql_access on your databricks.Group or databricks_user.

Access control

  • databricks.Permissions can control which groups or individual users can Can Use or Can Manage SQL warehouses.
  • databricks_sql_access on databricks.Group or databricks_user.

The following resources are often used in the same context:

  • End to end workspace management guide.
  • databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.
  • databricks.SqlDashboard to manage Databricks SQL Dashboards.
  • databricks.SqlGlobalConfig to configure the security policy, databricks_instance_profile, and data access properties for all databricks.SqlEndpoint of workspace.
  • databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and more.

Example Usage

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var me = Databricks.GetCurrentUser.Invoke();

    var @this = new Databricks.SqlEndpoint("this", new()
    {
        ClusterSize = "Small",
        MaxNumClusters = 1,
        Tags = new Databricks.Inputs.SqlEndpointTagsArgs
        {
            CustomTags = new[]
            {
                new Databricks.Inputs.SqlEndpointTagsCustomTagArgs
                {
                    Key = "City",
                    Value = "Amsterdam",
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.GetCurrentUser(ctx, nil, nil)
		if err != nil {
			return err
		}
		_, err = databricks.NewSqlEndpoint(ctx, "this", &databricks.SqlEndpointArgs{
			ClusterSize:    pulumi.String("Small"),
			MaxNumClusters: pulumi.Int(1),
			Tags: &databricks.SqlEndpointTagsArgs{
				CustomTags: databricks.SqlEndpointTagsCustomTagArray{
					&databricks.SqlEndpointTagsCustomTagArgs{
						Key:   pulumi.String("City"),
						Value: pulumi.String("Amsterdam"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.SqlEndpoint;
import com.pulumi.databricks.SqlEndpointArgs;
import com.pulumi.databricks.inputs.SqlEndpointTagsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var me = DatabricksFunctions.getCurrentUser();

        var this_ = new SqlEndpoint("this", SqlEndpointArgs.builder()        
            .clusterSize("Small")
            .maxNumClusters(1)
            .tags(SqlEndpointTagsArgs.builder()
                .customTags(SqlEndpointTagsCustomTagArgs.builder()
                    .key("City")
                    .value("Amsterdam")
                    .build())
                .build())
            .build());

    }
}
import pulumi
import pulumi_databricks as databricks

me = databricks.get_current_user()
this = databricks.SqlEndpoint("this",
    cluster_size="Small",
    max_num_clusters=1,
    tags=databricks.SqlEndpointTagsArgs(
        custom_tags=[databricks.SqlEndpointTagsCustomTagArgs(
            key="City",
            value="Amsterdam",
        )],
    ))
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const me = databricks.getCurrentUser({});
const _this = new databricks.SqlEndpoint("this", {
    clusterSize: "Small",
    maxNumClusters: 1,
    tags: {
        customTags: [{
            key: "City",
            value: "Amsterdam",
        }],
    },
});
resources:
  this:
    type: databricks:SqlEndpoint
    properties:
      clusterSize: Small
      maxNumClusters: 1
      tags:
        customTags:
          - key: City
            value: Amsterdam
variables:
  me:
    fn::invoke:
      Function: databricks:getCurrentUser
      Arguments: {}

Create SqlEndpoint Resource

new SqlEndpoint(name: string, args: SqlEndpointArgs, opts?: CustomResourceOptions);
@overload
def SqlEndpoint(resource_name: str,
                opts: Optional[ResourceOptions] = None,
                auto_stop_mins: Optional[int] = None,
                channel: Optional[SqlEndpointChannelArgs] = None,
                cluster_size: Optional[str] = None,
                data_source_id: Optional[str] = None,
                enable_photon: Optional[bool] = None,
                enable_serverless_compute: Optional[bool] = None,
                instance_profile_arn: Optional[str] = None,
                jdbc_url: Optional[str] = None,
                max_num_clusters: Optional[int] = None,
                min_num_clusters: Optional[int] = None,
                name: Optional[str] = None,
                num_clusters: Optional[int] = None,
                odbc_params: Optional[SqlEndpointOdbcParamsArgs] = None,
                spot_instance_policy: Optional[str] = None,
                state: Optional[str] = None,
                tags: Optional[SqlEndpointTagsArgs] = None,
                warehouse_type: Optional[str] = None)
@overload
def SqlEndpoint(resource_name: str,
                args: SqlEndpointArgs,
                opts: Optional[ResourceOptions] = None)
func NewSqlEndpoint(ctx *Context, name string, args SqlEndpointArgs, opts ...ResourceOption) (*SqlEndpoint, error)
public SqlEndpoint(string name, SqlEndpointArgs args, CustomResourceOptions? opts = null)
public SqlEndpoint(String name, SqlEndpointArgs args)
public SqlEndpoint(String name, SqlEndpointArgs args, CustomResourceOptions options)
type: databricks:SqlEndpoint
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

name string
The unique name of the resource.
args SqlEndpointArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name str
The unique name of the resource.
args SqlEndpointArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name string
The unique name of the resource.
args SqlEndpointArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name string
The unique name of the resource.
args SqlEndpointArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name String
The unique name of the resource.
args SqlEndpointArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

SqlEndpoint Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

The SqlEndpoint resource accepts the following input properties:

ClusterSize string

The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".

AutoStopMins int

Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.

Channel SqlEndpointChannelArgs

block, consisting of following fields:

DataSourceId string

ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.

EnablePhoton bool

Whether to enable Photon. This field is optional and is enabled by default.

EnableServerlessCompute bool

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

InstanceProfileArn string
JdbcUrl string

JDBC connection string.

MaxNumClusters int

Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.

MinNumClusters int

Minimum number of clusters available when a SQL warehouse is running. The default is 1.

Name string

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

NumClusters int
OdbcParams SqlEndpointOdbcParamsArgs

ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.

SpotInstancePolicy string

The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.

State string
Tags SqlEndpointTagsArgs

Databricks tags all endpoint resources with these tags.

WarehouseType string

SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

ClusterSize string

The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".

AutoStopMins int

Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.

Channel SqlEndpointChannelArgs

block, consisting of following fields:

DataSourceId string

ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.

EnablePhoton bool

Whether to enable Photon. This field is optional and is enabled by default.

EnableServerlessCompute bool

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

InstanceProfileArn string
JdbcUrl string

JDBC connection string.

MaxNumClusters int

Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.

MinNumClusters int

Minimum number of clusters available when a SQL warehouse is running. The default is 1.

Name string

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

NumClusters int
OdbcParams SqlEndpointOdbcParamsArgs

ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.

SpotInstancePolicy string

The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.

State string
Tags SqlEndpointTagsArgs

Databricks tags all endpoint resources with these tags.

WarehouseType string

SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

clusterSize String

The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".

autoStopMins Integer

Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.

channel SqlEndpointChannelArgs

block, consisting of following fields:

dataSourceId String

ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.

enablePhoton Boolean

Whether to enable Photon. This field is optional and is enabled by default.

enableServerlessCompute Boolean

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

instanceProfileArn String
jdbcUrl String

JDBC connection string.

maxNumClusters Integer

Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.

minNumClusters Integer

Minimum number of clusters available when a SQL warehouse is running. The default is 1.

name String

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

numClusters Integer
odbcParams SqlEndpointOdbcParamsArgs

ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.

spotInstancePolicy String

The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.

state String
tags SqlEndpointTagsArgs

Databricks tags all endpoint resources with these tags.

warehouseType String

SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

clusterSize string

The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".

autoStopMins number

Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.

channel SqlEndpointChannelArgs

block, consisting of following fields:

dataSourceId string

ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.

enablePhoton boolean

Whether to enable Photon. This field is optional and is enabled by default.

enableServerlessCompute boolean

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

instanceProfileArn string
jdbcUrl string

JDBC connection string.

maxNumClusters number

Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.

minNumClusters number

Minimum number of clusters available when a SQL warehouse is running. The default is 1.

name string

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

numClusters number
odbcParams SqlEndpointOdbcParamsArgs

ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.

spotInstancePolicy string

The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.

state string
tags SqlEndpointTagsArgs

Databricks tags all endpoint resources with these tags.

warehouseType string

SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

cluster_size str

The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".

auto_stop_mins int

Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.

channel SqlEndpointChannelArgs

block, consisting of following fields:

data_source_id str

ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.

enable_photon bool

Whether to enable Photon. This field is optional and is enabled by default.

enable_serverless_compute bool

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

instance_profile_arn str
jdbc_url str

JDBC connection string.

max_num_clusters int

Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.

min_num_clusters int

Minimum number of clusters available when a SQL warehouse is running. The default is 1.

name str

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

num_clusters int
odbc_params SqlEndpointOdbcParamsArgs

ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.

spot_instance_policy str

The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.

state str
tags SqlEndpointTagsArgs

Databricks tags all endpoint resources with these tags.

warehouse_type str

SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

clusterSize String

The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".

autoStopMins Number

Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.

channel Property Map

block, consisting of following fields:

dataSourceId String

ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.

enablePhoton Boolean

Whether to enable Photon. This field is optional and is enabled by default.

enableServerlessCompute Boolean

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

instanceProfileArn String
jdbcUrl String

JDBC connection string.

maxNumClusters Number

Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.

minNumClusters Number

Minimum number of clusters available when a SQL warehouse is running. The default is 1.

name String

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

numClusters Number
odbcParams Property Map

ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.

spotInstancePolicy String

The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.

state String
tags Property Map

Databricks tags all endpoint resources with these tags.

warehouseType String

SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

Outputs

All input properties are implicitly available as output properties. Additionally, the SqlEndpoint resource produces the following output properties:

Id string

The provider-assigned unique ID for this managed resource.

Id string

The provider-assigned unique ID for this managed resource.

id String

The provider-assigned unique ID for this managed resource.

id string

The provider-assigned unique ID for this managed resource.

id str

The provider-assigned unique ID for this managed resource.

id String

The provider-assigned unique ID for this managed resource.

Look up Existing SqlEndpoint Resource

Get an existing SqlEndpoint resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: SqlEndpointState, opts?: CustomResourceOptions): SqlEndpoint
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        auto_stop_mins: Optional[int] = None,
        channel: Optional[SqlEndpointChannelArgs] = None,
        cluster_size: Optional[str] = None,
        data_source_id: Optional[str] = None,
        enable_photon: Optional[bool] = None,
        enable_serverless_compute: Optional[bool] = None,
        instance_profile_arn: Optional[str] = None,
        jdbc_url: Optional[str] = None,
        max_num_clusters: Optional[int] = None,
        min_num_clusters: Optional[int] = None,
        name: Optional[str] = None,
        num_clusters: Optional[int] = None,
        odbc_params: Optional[SqlEndpointOdbcParamsArgs] = None,
        spot_instance_policy: Optional[str] = None,
        state: Optional[str] = None,
        tags: Optional[SqlEndpointTagsArgs] = None,
        warehouse_type: Optional[str] = None) -> SqlEndpoint
func GetSqlEndpoint(ctx *Context, name string, id IDInput, state *SqlEndpointState, opts ...ResourceOption) (*SqlEndpoint, error)
public static SqlEndpoint Get(string name, Input<string> id, SqlEndpointState? state, CustomResourceOptions? opts = null)
public static SqlEndpoint get(String name, Output<String> id, SqlEndpointState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
AutoStopMins int

Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.

Channel SqlEndpointChannelArgs

block, consisting of following fields:

ClusterSize string

The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".

DataSourceId string

ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.

EnablePhoton bool

Whether to enable Photon. This field is optional and is enabled by default.

EnableServerlessCompute bool

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

InstanceProfileArn string
JdbcUrl string

JDBC connection string.

MaxNumClusters int

Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.

MinNumClusters int

Minimum number of clusters available when a SQL warehouse is running. The default is 1.

Name string

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

NumClusters int
OdbcParams SqlEndpointOdbcParamsArgs

ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.

SpotInstancePolicy string

The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.

State string
Tags SqlEndpointTagsArgs

Databricks tags all endpoint resources with these tags.

WarehouseType string

SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

AutoStopMins int

Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.

Channel SqlEndpointChannelArgs

block, consisting of following fields:

ClusterSize string

The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".

DataSourceId string

ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.

EnablePhoton bool

Whether to enable Photon. This field is optional and is enabled by default.

EnableServerlessCompute bool

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

InstanceProfileArn string
JdbcUrl string

JDBC connection string.

MaxNumClusters int

Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.

MinNumClusters int

Minimum number of clusters available when a SQL warehouse is running. The default is 1.

Name string

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

NumClusters int
OdbcParams SqlEndpointOdbcParamsArgs

ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.

SpotInstancePolicy string

The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.

State string
Tags SqlEndpointTagsArgs

Databricks tags all endpoint resources with these tags.

WarehouseType string

SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

autoStopMins Integer

Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.

channel SqlEndpointChannelArgs

block, consisting of following fields:

clusterSize String

The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".

dataSourceId String

ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.

enablePhoton Boolean

Whether to enable Photon. This field is optional and is enabled by default.

enableServerlessCompute Boolean

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

instanceProfileArn String
jdbcUrl String

JDBC connection string.

maxNumClusters Integer

Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.

minNumClusters Integer

Minimum number of clusters available when a SQL warehouse is running. The default is 1.

name String

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

numClusters Integer
odbcParams SqlEndpointOdbcParamsArgs

ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.

spotInstancePolicy String

The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.

state String
tags SqlEndpointTagsArgs

Databricks tags all endpoint resources with these tags.

warehouseType String

SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

autoStopMins number

Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.

channel SqlEndpointChannelArgs

block, consisting of following fields:

clusterSize string

The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".

dataSourceId string

ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.

enablePhoton boolean

Whether to enable Photon. This field is optional and is enabled by default.

enableServerlessCompute boolean

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

instanceProfileArn string
jdbcUrl string

JDBC connection string.

maxNumClusters number

Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.

minNumClusters number

Minimum number of clusters available when a SQL warehouse is running. The default is 1.

name string

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

numClusters number
odbcParams SqlEndpointOdbcParamsArgs

ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.

spotInstancePolicy string

The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.

state string
tags SqlEndpointTagsArgs

Databricks tags all endpoint resources with these tags.

warehouseType string

SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

auto_stop_mins int

Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.

channel SqlEndpointChannelArgs

block, consisting of following fields:

cluster_size str

The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".

data_source_id str

ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.

enable_photon bool

Whether to enable Photon. This field is optional and is enabled by default.

enable_serverless_compute bool

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

instance_profile_arn str
jdbc_url str

JDBC connection string.

max_num_clusters int

Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.

min_num_clusters int

Minimum number of clusters available when a SQL warehouse is running. The default is 1.

name str

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

num_clusters int
odbc_params SqlEndpointOdbcParamsArgs

ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.

spot_instance_policy str

The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.

state str
tags SqlEndpointTagsArgs

Databricks tags all endpoint resources with these tags.

warehouse_type str

SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

autoStopMins Number

Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.

channel Property Map

block, consisting of following fields:

clusterSize String

The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".

dataSourceId String

ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.

enablePhoton Boolean

Whether to enable Photon. This field is optional and is enabled by default.

enableServerlessCompute Boolean

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

instanceProfileArn String
jdbcUrl String

JDBC connection string.

maxNumClusters Number

Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.

minNumClusters Number

Minimum number of clusters available when a SQL warehouse is running. The default is 1.

name String

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

numClusters Number
odbcParams Property Map

ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.

spotInstancePolicy String

The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.

state String
tags Property Map

Databricks tags all endpoint resources with these tags.

warehouseType String

SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

Supporting Types

SqlEndpointChannel

Name string

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

Name string

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

name String

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

name string

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

name str

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

name String

Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

SqlEndpointOdbcParams

Path string
Port int
Protocol string
Host string
Hostname string
Path string
Port int
Protocol string
Host string
Hostname string
path String
port Integer
protocol String
host String
hostname String
path string
port number
protocol string
host string
hostname string
path str
port int
protocol str
host str
hostname str
path String
port Number
protocol String
host String
hostname String

SqlEndpointTags

SqlEndpointTagsCustomTag

Key string
Value string
Key string
Value string
key String
value String
key string
value string
key str
value str
key String
value String

Import

You can import a databricks_sql_endpoint resource with ID like the followingbash

 $ pulumi import databricks:index/sqlEndpoint:SqlEndpoint this <endpoint-id>

Package Details

Repository
databricks pulumi/pulumi-databricks
License
Apache-2.0
Notes

This Pulumi package is based on the databricks Terraform Provider.