databricks.SqlEndpoint
Explore with Pulumi AI
This resource is used to manage Databricks SQL warehouses. To create SQL warehouses you must have databricks_sql_access
on your databricks.Group or databricks_user.
Access control
- databricks.Permissions can control which groups or individual users can Can Use or Can Manage SQL warehouses.
databricks_sql_access
on databricks.Group or databricks_user.
Related resources
The following resources are often used in the same context:
- End to end workspace management guide.
- databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.
- databricks.SqlDashboard to manage Databricks SQL Dashboards.
- databricks.SqlGlobalConfig to configure the security policy, databricks_instance_profile, and data access properties for all databricks.SqlEndpoint of workspace.
- databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and more.
Example Usage
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var me = Databricks.GetCurrentUser.Invoke();
var @this = new Databricks.SqlEndpoint("this", new()
{
ClusterSize = "Small",
MaxNumClusters = 1,
Tags = new Databricks.Inputs.SqlEndpointTagsArgs
{
CustomTags = new[]
{
new Databricks.Inputs.SqlEndpointTagsCustomTagArgs
{
Key = "City",
Value = "Amsterdam",
},
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.GetCurrentUser(ctx, nil, nil)
if err != nil {
return err
}
_, err = databricks.NewSqlEndpoint(ctx, "this", &databricks.SqlEndpointArgs{
ClusterSize: pulumi.String("Small"),
MaxNumClusters: pulumi.Int(1),
Tags: &databricks.SqlEndpointTagsArgs{
CustomTags: databricks.SqlEndpointTagsCustomTagArray{
&databricks.SqlEndpointTagsCustomTagArgs{
Key: pulumi.String("City"),
Value: pulumi.String("Amsterdam"),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.SqlEndpoint;
import com.pulumi.databricks.SqlEndpointArgs;
import com.pulumi.databricks.inputs.SqlEndpointTagsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var me = DatabricksFunctions.getCurrentUser();
var this_ = new SqlEndpoint("this", SqlEndpointArgs.builder()
.clusterSize("Small")
.maxNumClusters(1)
.tags(SqlEndpointTagsArgs.builder()
.customTags(SqlEndpointTagsCustomTagArgs.builder()
.key("City")
.value("Amsterdam")
.build())
.build())
.build());
}
}
import pulumi
import pulumi_databricks as databricks
me = databricks.get_current_user()
this = databricks.SqlEndpoint("this",
cluster_size="Small",
max_num_clusters=1,
tags=databricks.SqlEndpointTagsArgs(
custom_tags=[databricks.SqlEndpointTagsCustomTagArgs(
key="City",
value="Amsterdam",
)],
))
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const me = databricks.getCurrentUser({});
const _this = new databricks.SqlEndpoint("this", {
clusterSize: "Small",
maxNumClusters: 1,
tags: {
customTags: [{
key: "City",
value: "Amsterdam",
}],
},
});
resources:
this:
type: databricks:SqlEndpoint
properties:
clusterSize: Small
maxNumClusters: 1
tags:
customTags:
- key: City
value: Amsterdam
variables:
me:
fn::invoke:
Function: databricks:getCurrentUser
Arguments: {}
Create SqlEndpoint Resource
new SqlEndpoint(name: string, args: SqlEndpointArgs, opts?: CustomResourceOptions);
@overload
def SqlEndpoint(resource_name: str,
opts: Optional[ResourceOptions] = None,
auto_stop_mins: Optional[int] = None,
channel: Optional[SqlEndpointChannelArgs] = None,
cluster_size: Optional[str] = None,
data_source_id: Optional[str] = None,
enable_photon: Optional[bool] = None,
enable_serverless_compute: Optional[bool] = None,
instance_profile_arn: Optional[str] = None,
jdbc_url: Optional[str] = None,
max_num_clusters: Optional[int] = None,
min_num_clusters: Optional[int] = None,
name: Optional[str] = None,
num_clusters: Optional[int] = None,
odbc_params: Optional[SqlEndpointOdbcParamsArgs] = None,
spot_instance_policy: Optional[str] = None,
state: Optional[str] = None,
tags: Optional[SqlEndpointTagsArgs] = None,
warehouse_type: Optional[str] = None)
@overload
def SqlEndpoint(resource_name: str,
args: SqlEndpointArgs,
opts: Optional[ResourceOptions] = None)
func NewSqlEndpoint(ctx *Context, name string, args SqlEndpointArgs, opts ...ResourceOption) (*SqlEndpoint, error)
public SqlEndpoint(string name, SqlEndpointArgs args, CustomResourceOptions? opts = null)
public SqlEndpoint(String name, SqlEndpointArgs args)
public SqlEndpoint(String name, SqlEndpointArgs args, CustomResourceOptions options)
type: databricks:SqlEndpoint
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
SqlEndpoint Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The SqlEndpoint resource accepts the following input properties:
- Cluster
Size string The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- Auto
Stop intMins Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- Channel
Sql
Endpoint Channel Args block, consisting of following fields:
- Data
Source stringId ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- Enable
Photon bool Whether to enable Photon. This field is optional and is enabled by default.
- Enable
Serverless boolCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- Instance
Profile stringArn - Jdbc
Url string JDBC connection string.
- Max
Num intClusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
.- Min
Num intClusters Minimum number of clusters available when a SQL warehouse is running. The default is
1
.- Name string
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.- Num
Clusters int - Odbc
Params SqlEndpoint Odbc Params Args ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
.- Spot
Instance stringPolicy The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
.- State string
- Sql
Endpoint Tags Args Databricks tags all endpoint resources with these tags.
- Warehouse
Type string SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- Cluster
Size string The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- Auto
Stop intMins Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- Channel
Sql
Endpoint Channel Args block, consisting of following fields:
- Data
Source stringId ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- Enable
Photon bool Whether to enable Photon. This field is optional and is enabled by default.
- Enable
Serverless boolCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- Instance
Profile stringArn - Jdbc
Url string JDBC connection string.
- Max
Num intClusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
.- Min
Num intClusters Minimum number of clusters available when a SQL warehouse is running. The default is
1
.- Name string
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.- Num
Clusters int - Odbc
Params SqlEndpoint Odbc Params Args ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
.- Spot
Instance stringPolicy The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
.- State string
- Sql
Endpoint Tags Args Databricks tags all endpoint resources with these tags.
- Warehouse
Type string SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- cluster
Size String The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- auto
Stop IntegerMins Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
Sql
Endpoint Channel Args block, consisting of following fields:
- data
Source StringId ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable
Photon Boolean Whether to enable Photon. This field is optional and is enabled by default.
- enable
Serverless BooleanCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- instance
Profile StringArn - jdbc
Url String JDBC connection string.
- max
Num IntegerClusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
.- min
Num IntegerClusters Minimum number of clusters available when a SQL warehouse is running. The default is
1
.- name String
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.- num
Clusters Integer - odbc
Params SqlEndpoint Odbc Params Args ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
.- spot
Instance StringPolicy The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
.- state String
- Sql
Endpoint Tags Args Databricks tags all endpoint resources with these tags.
- warehouse
Type String SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- cluster
Size string The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- auto
Stop numberMins Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
Sql
Endpoint Channel Args block, consisting of following fields:
- data
Source stringId ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable
Photon boolean Whether to enable Photon. This field is optional and is enabled by default.
- enable
Serverless booleanCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- instance
Profile stringArn - jdbc
Url string JDBC connection string.
- max
Num numberClusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
.- min
Num numberClusters Minimum number of clusters available when a SQL warehouse is running. The default is
1
.- name string
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.- num
Clusters number - odbc
Params SqlEndpoint Odbc Params Args ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
.- spot
Instance stringPolicy The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
.- state string
- Sql
Endpoint Tags Args Databricks tags all endpoint resources with these tags.
- warehouse
Type string SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- cluster_
size str The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- auto_
stop_ intmins Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
Sql
Endpoint Channel Args block, consisting of following fields:
- data_
source_ strid ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable_
photon bool Whether to enable Photon. This field is optional and is enabled by default.
- enable_
serverless_ boolcompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- instance_
profile_ strarn - jdbc_
url str JDBC connection string.
- max_
num_ intclusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
.- min_
num_ intclusters Minimum number of clusters available when a SQL warehouse is running. The default is
1
.- name str
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.- num_
clusters int - odbc_
params SqlEndpoint Odbc Params Args ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
.- spot_
instance_ strpolicy The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
.- state str
- Sql
Endpoint Tags Args Databricks tags all endpoint resources with these tags.
- warehouse_
type str SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- cluster
Size String The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- auto
Stop NumberMins Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel Property Map
block, consisting of following fields:
- data
Source StringId ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable
Photon Boolean Whether to enable Photon. This field is optional and is enabled by default.
- enable
Serverless BooleanCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- instance
Profile StringArn - jdbc
Url String JDBC connection string.
- max
Num NumberClusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
.- min
Num NumberClusters Minimum number of clusters available when a SQL warehouse is running. The default is
1
.- name String
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.- num
Clusters Number - odbc
Params Property Map ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
.- spot
Instance StringPolicy The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
.- state String
- Property Map
Databricks tags all endpoint resources with these tags.
- warehouse
Type String SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
Outputs
All input properties are implicitly available as output properties. Additionally, the SqlEndpoint resource produces the following output properties:
- Id string
The provider-assigned unique ID for this managed resource.
- Id string
The provider-assigned unique ID for this managed resource.
- id String
The provider-assigned unique ID for this managed resource.
- id string
The provider-assigned unique ID for this managed resource.
- id str
The provider-assigned unique ID for this managed resource.
- id String
The provider-assigned unique ID for this managed resource.
Look up Existing SqlEndpoint Resource
Get an existing SqlEndpoint resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: SqlEndpointState, opts?: CustomResourceOptions): SqlEndpoint
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
auto_stop_mins: Optional[int] = None,
channel: Optional[SqlEndpointChannelArgs] = None,
cluster_size: Optional[str] = None,
data_source_id: Optional[str] = None,
enable_photon: Optional[bool] = None,
enable_serverless_compute: Optional[bool] = None,
instance_profile_arn: Optional[str] = None,
jdbc_url: Optional[str] = None,
max_num_clusters: Optional[int] = None,
min_num_clusters: Optional[int] = None,
name: Optional[str] = None,
num_clusters: Optional[int] = None,
odbc_params: Optional[SqlEndpointOdbcParamsArgs] = None,
spot_instance_policy: Optional[str] = None,
state: Optional[str] = None,
tags: Optional[SqlEndpointTagsArgs] = None,
warehouse_type: Optional[str] = None) -> SqlEndpoint
func GetSqlEndpoint(ctx *Context, name string, id IDInput, state *SqlEndpointState, opts ...ResourceOption) (*SqlEndpoint, error)
public static SqlEndpoint Get(string name, Input<string> id, SqlEndpointState? state, CustomResourceOptions? opts = null)
public static SqlEndpoint get(String name, Output<String> id, SqlEndpointState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Auto
Stop intMins Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- Channel
Sql
Endpoint Channel Args block, consisting of following fields:
- Cluster
Size string The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- Data
Source stringId ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- Enable
Photon bool Whether to enable Photon. This field is optional and is enabled by default.
- Enable
Serverless boolCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- Instance
Profile stringArn - Jdbc
Url string JDBC connection string.
- Max
Num intClusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
.- Min
Num intClusters Minimum number of clusters available when a SQL warehouse is running. The default is
1
.- Name string
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.- Num
Clusters int - Odbc
Params SqlEndpoint Odbc Params Args ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
.- Spot
Instance stringPolicy The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
.- State string
- Sql
Endpoint Tags Args Databricks tags all endpoint resources with these tags.
- Warehouse
Type string SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- Auto
Stop intMins Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- Channel
Sql
Endpoint Channel Args block, consisting of following fields:
- Cluster
Size string The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- Data
Source stringId ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- Enable
Photon bool Whether to enable Photon. This field is optional and is enabled by default.
- Enable
Serverless boolCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- Instance
Profile stringArn - Jdbc
Url string JDBC connection string.
- Max
Num intClusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
.- Min
Num intClusters Minimum number of clusters available when a SQL warehouse is running. The default is
1
.- Name string
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.- Num
Clusters int - Odbc
Params SqlEndpoint Odbc Params Args ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
.- Spot
Instance stringPolicy The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
.- State string
- Sql
Endpoint Tags Args Databricks tags all endpoint resources with these tags.
- Warehouse
Type string SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- auto
Stop IntegerMins Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
Sql
Endpoint Channel Args block, consisting of following fields:
- cluster
Size String The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- data
Source StringId ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable
Photon Boolean Whether to enable Photon. This field is optional and is enabled by default.
- enable
Serverless BooleanCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- instance
Profile StringArn - jdbc
Url String JDBC connection string.
- max
Num IntegerClusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
.- min
Num IntegerClusters Minimum number of clusters available when a SQL warehouse is running. The default is
1
.- name String
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.- num
Clusters Integer - odbc
Params SqlEndpoint Odbc Params Args ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
.- spot
Instance StringPolicy The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
.- state String
- Sql
Endpoint Tags Args Databricks tags all endpoint resources with these tags.
- warehouse
Type String SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- auto
Stop numberMins Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
Sql
Endpoint Channel Args block, consisting of following fields:
- cluster
Size string The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- data
Source stringId ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable
Photon boolean Whether to enable Photon. This field is optional and is enabled by default.
- enable
Serverless booleanCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- instance
Profile stringArn - jdbc
Url string JDBC connection string.
- max
Num numberClusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
.- min
Num numberClusters Minimum number of clusters available when a SQL warehouse is running. The default is
1
.- name string
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.- num
Clusters number - odbc
Params SqlEndpoint Odbc Params Args ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
.- spot
Instance stringPolicy The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
.- state string
- Sql
Endpoint Tags Args Databricks tags all endpoint resources with these tags.
- warehouse
Type string SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- auto_
stop_ intmins Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
Sql
Endpoint Channel Args block, consisting of following fields:
- cluster_
size str The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- data_
source_ strid ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable_
photon bool Whether to enable Photon. This field is optional and is enabled by default.
- enable_
serverless_ boolcompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- instance_
profile_ strarn - jdbc_
url str JDBC connection string.
- max_
num_ intclusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
.- min_
num_ intclusters Minimum number of clusters available when a SQL warehouse is running. The default is
1
.- name str
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.- num_
clusters int - odbc_
params SqlEndpoint Odbc Params Args ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
.- spot_
instance_ strpolicy The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
.- state str
- Sql
Endpoint Tags Args Databricks tags all endpoint resources with these tags.
- warehouse_
type str SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- auto
Stop NumberMins Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel Property Map
block, consisting of following fields:
- cluster
Size String The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- data
Source StringId ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable
Photon Boolean Whether to enable Photon. This field is optional and is enabled by default.
- enable
Serverless BooleanCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- instance
Profile StringArn - jdbc
Url String JDBC connection string.
- max
Num NumberClusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
.- min
Num NumberClusters Minimum number of clusters available when a SQL warehouse is running. The default is
1
.- name String
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.- num
Clusters Number - odbc
Params Property Map ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
.- spot
Instance StringPolicy The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
.- state String
- Property Map
Databricks tags all endpoint resources with these tags.
- warehouse
Type String SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
Supporting Types
SqlEndpointChannel
- Name string
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.
- Name string
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.
- name String
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.
- name string
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.
- name str
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.
- name String
Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.
SqlEndpointOdbcParams
SqlEndpointTags
SqlEndpointTagsCustomTag
Import
You can import a databricks_sql_endpoint
resource with ID like the followingbash
$ pulumi import databricks:index/sqlEndpoint:SqlEndpoint this <endpoint-id>
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
This Pulumi package is based on the
databricks
Terraform Provider.