databricks logo
Databricks v1.14.0, May 23 23

databricks.InstancePool

Explore with Pulumi AI

This resource allows you to manage instance pools to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. An instance pool reduces cluster start and auto-scaling times by maintaining a set of idle, ready-to-use cloud instances. When a cluster attached to a pool needs an instance, it first attempts to allocate one of the pool’s idle instances. If the pool has no idle instances, it expands by allocating a new instance from the instance provider in order to accommodate the cluster’s request. When a cluster releases an instance, it returns to the pool and is free for another cluster to use. Only clusters attached to a pool can use that pool’s idle instances.

Note It is important to know that different cloud service providers have different node_type_id, disk_specs and potentially other configurations.

Access Control

  • databricks.Group and databricks.User can control which groups or individual users can create instance pools.
  • databricks.Permissions can control which groups or individual users can Manage or Attach to individual instance pools.

Example Usage

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var smallest = Databricks.GetNodeType.Invoke();

    var smallestNodes = new Databricks.InstancePool("smallestNodes", new()
    {
        InstancePoolName = "Smallest Nodes",
        MinIdleInstances = 0,
        MaxCapacity = 300,
        NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
        AwsAttributes = new Databricks.Inputs.InstancePoolAwsAttributesArgs
        {
            Availability = "ON_DEMAND",
            ZoneId = "us-east-1a",
            SpotBidPricePercent = 100,
        },
        IdleInstanceAutoterminationMinutes = 10,
        DiskSpec = new Databricks.Inputs.InstancePoolDiskSpecArgs
        {
            DiskType = new Databricks.Inputs.InstancePoolDiskSpecDiskTypeArgs
            {
                EbsVolumeType = "GENERAL_PURPOSE_SSD",
            },
            DiskSize = 80,
            DiskCount = 1,
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		smallest, err := databricks.GetNodeType(ctx, nil, nil)
		if err != nil {
			return err
		}
		_, err = databricks.NewInstancePool(ctx, "smallestNodes", &databricks.InstancePoolArgs{
			InstancePoolName: pulumi.String("Smallest Nodes"),
			MinIdleInstances: pulumi.Int(0),
			MaxCapacity:      pulumi.Int(300),
			NodeTypeId:       *pulumi.String(smallest.Id),
			AwsAttributes: &databricks.InstancePoolAwsAttributesArgs{
				Availability:        pulumi.String("ON_DEMAND"),
				ZoneId:              pulumi.String("us-east-1a"),
				SpotBidPricePercent: pulumi.Int(100),
			},
			IdleInstanceAutoterminationMinutes: pulumi.Int(10),
			DiskSpec: &databricks.InstancePoolDiskSpecArgs{
				DiskType: &databricks.InstancePoolDiskSpecDiskTypeArgs{
					EbsVolumeType: pulumi.String("GENERAL_PURPOSE_SSD"),
				},
				DiskSize:  pulumi.Int(80),
				DiskCount: pulumi.Int(1),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetNodeTypeArgs;
import com.pulumi.databricks.InstancePool;
import com.pulumi.databricks.InstancePoolArgs;
import com.pulumi.databricks.inputs.InstancePoolAwsAttributesArgs;
import com.pulumi.databricks.inputs.InstancePoolDiskSpecArgs;
import com.pulumi.databricks.inputs.InstancePoolDiskSpecDiskTypeArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var smallest = DatabricksFunctions.getNodeType();

        var smallestNodes = new InstancePool("smallestNodes", InstancePoolArgs.builder()        
            .instancePoolName("Smallest Nodes")
            .minIdleInstances(0)
            .maxCapacity(300)
            .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
            .awsAttributes(InstancePoolAwsAttributesArgs.builder()
                .availability("ON_DEMAND")
                .zoneId("us-east-1a")
                .spotBidPricePercent("100")
                .build())
            .idleInstanceAutoterminationMinutes(10)
            .diskSpec(InstancePoolDiskSpecArgs.builder()
                .diskType(InstancePoolDiskSpecDiskTypeArgs.builder()
                    .ebsVolumeType("GENERAL_PURPOSE_SSD")
                    .build())
                .diskSize(80)
                .diskCount(1)
                .build())
            .build());

    }
}
import pulumi
import pulumi_databricks as databricks

smallest = databricks.get_node_type()
smallest_nodes = databricks.InstancePool("smallestNodes",
    instance_pool_name="Smallest Nodes",
    min_idle_instances=0,
    max_capacity=300,
    node_type_id=smallest.id,
    aws_attributes=databricks.InstancePoolAwsAttributesArgs(
        availability="ON_DEMAND",
        zone_id="us-east-1a",
        spot_bid_price_percent=100,
    ),
    idle_instance_autotermination_minutes=10,
    disk_spec=databricks.InstancePoolDiskSpecArgs(
        disk_type=databricks.InstancePoolDiskSpecDiskTypeArgs(
            ebs_volume_type="GENERAL_PURPOSE_SSD",
        ),
        disk_size=80,
        disk_count=1,
    ))
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const smallest = databricks.getNodeType({});
const smallestNodes = new databricks.InstancePool("smallestNodes", {
    instancePoolName: "Smallest Nodes",
    minIdleInstances: 0,
    maxCapacity: 300,
    nodeTypeId: smallest.then(smallest => smallest.id),
    awsAttributes: {
        availability: "ON_DEMAND",
        zoneId: "us-east-1a",
        spotBidPricePercent: 100,
    },
    idleInstanceAutoterminationMinutes: 10,
    diskSpec: {
        diskType: {
            ebsVolumeType: "GENERAL_PURPOSE_SSD",
        },
        diskSize: 80,
        diskCount: 1,
    },
});
resources:
  smallestNodes:
    type: databricks:InstancePool
    properties:
      instancePoolName: Smallest Nodes
      minIdleInstances: 0
      maxCapacity: 300
      nodeTypeId: ${smallest.id}
      awsAttributes:
        availability: ON_DEMAND
        zoneId: us-east-1a
        spotBidPricePercent: '100'
      idleInstanceAutoterminationMinutes: 10
      diskSpec:
        diskType:
          ebsVolumeType: GENERAL_PURPOSE_SSD
        diskSize: 80
        diskCount: 1
variables:
  smallest:
    fn::invoke:
      Function: databricks:getNodeType
      Arguments: {}

Create InstancePool Resource

new InstancePool(name: string, args: InstancePoolArgs, opts?: CustomResourceOptions);
@overload
def InstancePool(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 aws_attributes: Optional[InstancePoolAwsAttributesArgs] = None,
                 azure_attributes: Optional[InstancePoolAzureAttributesArgs] = None,
                 custom_tags: Optional[Mapping[str, Any]] = None,
                 disk_spec: Optional[InstancePoolDiskSpecArgs] = None,
                 enable_elastic_disk: Optional[bool] = None,
                 gcp_attributes: Optional[InstancePoolGcpAttributesArgs] = None,
                 idle_instance_autotermination_minutes: Optional[int] = None,
                 instance_pool_fleet_attributes: Optional[InstancePoolInstancePoolFleetAttributesArgs] = None,
                 instance_pool_id: Optional[str] = None,
                 instance_pool_name: Optional[str] = None,
                 max_capacity: Optional[int] = None,
                 min_idle_instances: Optional[int] = None,
                 node_type_id: Optional[str] = None,
                 preloaded_docker_images: Optional[Sequence[InstancePoolPreloadedDockerImageArgs]] = None,
                 preloaded_spark_versions: Optional[Sequence[str]] = None)
@overload
def InstancePool(resource_name: str,
                 args: InstancePoolArgs,
                 opts: Optional[ResourceOptions] = None)
func NewInstancePool(ctx *Context, name string, args InstancePoolArgs, opts ...ResourceOption) (*InstancePool, error)
public InstancePool(string name, InstancePoolArgs args, CustomResourceOptions? opts = null)
public InstancePool(String name, InstancePoolArgs args)
public InstancePool(String name, InstancePoolArgs args, CustomResourceOptions options)
type: databricks:InstancePool
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

name string
The unique name of the resource.
args InstancePoolArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name str
The unique name of the resource.
args InstancePoolArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name string
The unique name of the resource.
args InstancePoolArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name string
The unique name of the resource.
args InstancePoolArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name String
The unique name of the resource.
args InstancePoolArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

InstancePool Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

The InstancePool resource accepts the following input properties:

IdleInstanceAutoterminationMinutes int

(Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.

InstancePoolName string

(String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.

AwsAttributes InstancePoolAwsAttributesArgs
AzureAttributes InstancePoolAzureAttributesArgs
CustomTags Dictionary<string, object>

(Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.

DiskSpec InstancePoolDiskSpecArgs
EnableElasticDisk bool

(Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.

GcpAttributes InstancePoolGcpAttributesArgs
InstancePoolFleetAttributes InstancePoolInstancePoolFleetAttributesArgs
InstancePoolId string
MaxCapacity int

(Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.

MinIdleInstances int

(Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.

NodeTypeId string

(String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.

PreloadedDockerImages List<InstancePoolPreloadedDockerImageArgs>
PreloadedSparkVersions List<string>

(List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

IdleInstanceAutoterminationMinutes int

(Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.

InstancePoolName string

(String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.

AwsAttributes InstancePoolAwsAttributesArgs
AzureAttributes InstancePoolAzureAttributesArgs
CustomTags map[string]interface{}

(Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.

DiskSpec InstancePoolDiskSpecArgs
EnableElasticDisk bool

(Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.

GcpAttributes InstancePoolGcpAttributesArgs
InstancePoolFleetAttributes InstancePoolInstancePoolFleetAttributesArgs
InstancePoolId string
MaxCapacity int

(Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.

MinIdleInstances int

(Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.

NodeTypeId string

(String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.

PreloadedDockerImages []InstancePoolPreloadedDockerImageArgs
PreloadedSparkVersions []string

(List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

idleInstanceAutoterminationMinutes Integer

(Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.

instancePoolName String

(String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.

awsAttributes InstancePoolAwsAttributesArgs
azureAttributes InstancePoolAzureAttributesArgs
customTags Map<String,Object>

(Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.

diskSpec InstancePoolDiskSpecArgs
enableElasticDisk Boolean

(Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.

gcpAttributes InstancePoolGcpAttributesArgs
instancePoolFleetAttributes InstancePoolInstancePoolFleetAttributesArgs
instancePoolId String
maxCapacity Integer

(Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.

minIdleInstances Integer

(Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.

nodeTypeId String

(String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.

preloadedDockerImages List<InstancePoolPreloadedDockerImageArgs>
preloadedSparkVersions List<String>

(List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

idleInstanceAutoterminationMinutes number

(Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.

instancePoolName string

(String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.

awsAttributes InstancePoolAwsAttributesArgs
azureAttributes InstancePoolAzureAttributesArgs
customTags {[key: string]: any}

(Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.

diskSpec InstancePoolDiskSpecArgs
enableElasticDisk boolean

(Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.

gcpAttributes InstancePoolGcpAttributesArgs
instancePoolFleetAttributes InstancePoolInstancePoolFleetAttributesArgs
instancePoolId string
maxCapacity number

(Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.

minIdleInstances number

(Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.

nodeTypeId string

(String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.

preloadedDockerImages InstancePoolPreloadedDockerImageArgs[]
preloadedSparkVersions string[]

(List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

idle_instance_autotermination_minutes int

(Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.

instance_pool_name str

(String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.

aws_attributes InstancePoolAwsAttributesArgs
azure_attributes InstancePoolAzureAttributesArgs
custom_tags Mapping[str, Any]

(Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.

disk_spec InstancePoolDiskSpecArgs
enable_elastic_disk bool

(Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.

gcp_attributes InstancePoolGcpAttributesArgs
instance_pool_fleet_attributes InstancePoolInstancePoolFleetAttributesArgs
instance_pool_id str
max_capacity int

(Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.

min_idle_instances int

(Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.

node_type_id str

(String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.

preloaded_docker_images Sequence[InstancePoolPreloadedDockerImageArgs]
preloaded_spark_versions Sequence[str]

(List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

idleInstanceAutoterminationMinutes Number

(Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.

instancePoolName String

(String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.

awsAttributes Property Map
azureAttributes Property Map
customTags Map<Any>

(Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.

diskSpec Property Map
enableElasticDisk Boolean

(Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.

gcpAttributes Property Map
instancePoolFleetAttributes Property Map
instancePoolId String
maxCapacity Number

(Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.

minIdleInstances Number

(Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.

nodeTypeId String

(String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.

preloadedDockerImages List<Property Map>
preloadedSparkVersions List<String>

(List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

Outputs

All input properties are implicitly available as output properties. Additionally, the InstancePool resource produces the following output properties:

Id string

The provider-assigned unique ID for this managed resource.

Id string

The provider-assigned unique ID for this managed resource.

id String

The provider-assigned unique ID for this managed resource.

id string

The provider-assigned unique ID for this managed resource.

id str

The provider-assigned unique ID for this managed resource.

id String

The provider-assigned unique ID for this managed resource.

Look up Existing InstancePool Resource

Get an existing InstancePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: InstancePoolState, opts?: CustomResourceOptions): InstancePool
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        aws_attributes: Optional[InstancePoolAwsAttributesArgs] = None,
        azure_attributes: Optional[InstancePoolAzureAttributesArgs] = None,
        custom_tags: Optional[Mapping[str, Any]] = None,
        disk_spec: Optional[InstancePoolDiskSpecArgs] = None,
        enable_elastic_disk: Optional[bool] = None,
        gcp_attributes: Optional[InstancePoolGcpAttributesArgs] = None,
        idle_instance_autotermination_minutes: Optional[int] = None,
        instance_pool_fleet_attributes: Optional[InstancePoolInstancePoolFleetAttributesArgs] = None,
        instance_pool_id: Optional[str] = None,
        instance_pool_name: Optional[str] = None,
        max_capacity: Optional[int] = None,
        min_idle_instances: Optional[int] = None,
        node_type_id: Optional[str] = None,
        preloaded_docker_images: Optional[Sequence[InstancePoolPreloadedDockerImageArgs]] = None,
        preloaded_spark_versions: Optional[Sequence[str]] = None) -> InstancePool
func GetInstancePool(ctx *Context, name string, id IDInput, state *InstancePoolState, opts ...ResourceOption) (*InstancePool, error)
public static InstancePool Get(string name, Input<string> id, InstancePoolState? state, CustomResourceOptions? opts = null)
public static InstancePool get(String name, Output<String> id, InstancePoolState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
AwsAttributes InstancePoolAwsAttributesArgs
AzureAttributes InstancePoolAzureAttributesArgs
CustomTags Dictionary<string, object>

(Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.

DiskSpec InstancePoolDiskSpecArgs
EnableElasticDisk bool

(Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.

GcpAttributes InstancePoolGcpAttributesArgs
IdleInstanceAutoterminationMinutes int

(Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.

InstancePoolFleetAttributes InstancePoolInstancePoolFleetAttributesArgs
InstancePoolId string
InstancePoolName string

(String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.

MaxCapacity int

(Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.

MinIdleInstances int

(Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.

NodeTypeId string

(String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.

PreloadedDockerImages List<InstancePoolPreloadedDockerImageArgs>
PreloadedSparkVersions List<string>

(List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

AwsAttributes InstancePoolAwsAttributesArgs
AzureAttributes InstancePoolAzureAttributesArgs
CustomTags map[string]interface{}

(Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.

DiskSpec InstancePoolDiskSpecArgs
EnableElasticDisk bool

(Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.

GcpAttributes InstancePoolGcpAttributesArgs
IdleInstanceAutoterminationMinutes int

(Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.

InstancePoolFleetAttributes InstancePoolInstancePoolFleetAttributesArgs
InstancePoolId string
InstancePoolName string

(String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.

MaxCapacity int

(Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.

MinIdleInstances int

(Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.

NodeTypeId string

(String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.

PreloadedDockerImages []InstancePoolPreloadedDockerImageArgs
PreloadedSparkVersions []string

(List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

awsAttributes InstancePoolAwsAttributesArgs
azureAttributes InstancePoolAzureAttributesArgs
customTags Map<String,Object>

(Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.

diskSpec InstancePoolDiskSpecArgs
enableElasticDisk Boolean

(Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.

gcpAttributes InstancePoolGcpAttributesArgs
idleInstanceAutoterminationMinutes Integer

(Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.

instancePoolFleetAttributes InstancePoolInstancePoolFleetAttributesArgs
instancePoolId String
instancePoolName String

(String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.

maxCapacity Integer

(Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.

minIdleInstances Integer

(Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.

nodeTypeId String

(String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.

preloadedDockerImages List<InstancePoolPreloadedDockerImageArgs>
preloadedSparkVersions List<String>

(List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

awsAttributes InstancePoolAwsAttributesArgs
azureAttributes InstancePoolAzureAttributesArgs
customTags {[key: string]: any}

(Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.

diskSpec InstancePoolDiskSpecArgs
enableElasticDisk boolean

(Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.

gcpAttributes InstancePoolGcpAttributesArgs
idleInstanceAutoterminationMinutes number

(Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.

instancePoolFleetAttributes InstancePoolInstancePoolFleetAttributesArgs
instancePoolId string
instancePoolName string

(String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.

maxCapacity number

(Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.

minIdleInstances number

(Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.

nodeTypeId string

(String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.

preloadedDockerImages InstancePoolPreloadedDockerImageArgs[]
preloadedSparkVersions string[]

(List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

aws_attributes InstancePoolAwsAttributesArgs
azure_attributes InstancePoolAzureAttributesArgs
custom_tags Mapping[str, Any]

(Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.

disk_spec InstancePoolDiskSpecArgs
enable_elastic_disk bool

(Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.

gcp_attributes InstancePoolGcpAttributesArgs
idle_instance_autotermination_minutes int

(Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.

instance_pool_fleet_attributes InstancePoolInstancePoolFleetAttributesArgs
instance_pool_id str
instance_pool_name str

(String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.

max_capacity int

(Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.

min_idle_instances int

(Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.

node_type_id str

(String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.

preloaded_docker_images Sequence[InstancePoolPreloadedDockerImageArgs]
preloaded_spark_versions Sequence[str]

(List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

awsAttributes Property Map
azureAttributes Property Map
customTags Map<Any>

(Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.

diskSpec Property Map
enableElasticDisk Boolean

(Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.

gcpAttributes Property Map
idleInstanceAutoterminationMinutes Number

(Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.

instancePoolFleetAttributes Property Map
instancePoolId String
instancePoolName String

(String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.

maxCapacity Number

(Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.

minIdleInstances Number

(Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.

nodeTypeId String

(String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.

preloadedDockerImages List<Property Map>
preloadedSparkVersions List<String>

(List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

Supporting Types

InstancePoolAwsAttributes

Availability string

(String) Availability type used for all instances in the pool. Only ON_DEMAND and SPOT are supported.

SpotBidPricePercent int

(Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.

ZoneId string

(String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone ID if the Databricks deployment resides in the "us-east-1" region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.

Availability string

(String) Availability type used for all instances in the pool. Only ON_DEMAND and SPOT are supported.

SpotBidPricePercent int

(Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.

ZoneId string

(String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone ID if the Databricks deployment resides in the "us-east-1" region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.

availability String

(String) Availability type used for all instances in the pool. Only ON_DEMAND and SPOT are supported.

spotBidPricePercent Integer

(Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.

zoneId String

(String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone ID if the Databricks deployment resides in the "us-east-1" region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.

availability string

(String) Availability type used for all instances in the pool. Only ON_DEMAND and SPOT are supported.

spotBidPricePercent number

(Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.

zoneId string

(String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone ID if the Databricks deployment resides in the "us-east-1" region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.

availability str

(String) Availability type used for all instances in the pool. Only ON_DEMAND and SPOT are supported.

spot_bid_price_percent int

(Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.

zone_id str

(String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone ID if the Databricks deployment resides in the "us-east-1" region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.

availability String

(String) Availability type used for all instances in the pool. Only ON_DEMAND and SPOT are supported.

spotBidPricePercent Number

(Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.

zoneId String

(String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone ID if the Databricks deployment resides in the "us-east-1" region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.

InstancePoolAzureAttributes

Availability string

Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.

SpotBidMaxPrice double

The max price for Azure spot instances. Use -1 to specify the lowest price.

Availability string

Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.

SpotBidMaxPrice float64

The max price for Azure spot instances. Use -1 to specify the lowest price.

availability String

Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.

spotBidMaxPrice Double

The max price for Azure spot instances. Use -1 to specify the lowest price.

availability string

Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.

spotBidMaxPrice number

The max price for Azure spot instances. Use -1 to specify the lowest price.

availability str

Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.

spot_bid_max_price float

The max price for Azure spot instances. Use -1 to specify the lowest price.

availability String

Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.

spotBidMaxPrice Number

The max price for Azure spot instances. Use -1 to specify the lowest price.

InstancePoolDiskSpec

DiskCount int

(Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.

DiskSize int

(Integer) The size of each disk (in GiB) to attach.

DiskType InstancePoolDiskSpecDiskType
DiskCount int

(Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.

DiskSize int

(Integer) The size of each disk (in GiB) to attach.

DiskType InstancePoolDiskSpecDiskType
diskCount Integer

(Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.

diskSize Integer

(Integer) The size of each disk (in GiB) to attach.

diskType InstancePoolDiskSpecDiskType
diskCount number

(Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.

diskSize number

(Integer) The size of each disk (in GiB) to attach.

diskType InstancePoolDiskSpecDiskType
disk_count int

(Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.

disk_size int

(Integer) The size of each disk (in GiB) to attach.

disk_type InstancePoolDiskSpecDiskType
diskCount Number

(Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.

diskSize Number

(Integer) The size of each disk (in GiB) to attach.

diskType Property Map

InstancePoolDiskSpecDiskType

InstancePoolGcpAttributes

InstancePoolInstancePoolFleetAttributes

InstancePoolInstancePoolFleetAttributesFleetOnDemandOption

InstancePoolInstancePoolFleetAttributesFleetSpotOption

InstancePoolInstancePoolFleetAttributesLaunchTemplateOverride

InstancePoolPreloadedDockerImage

InstancePoolPreloadedDockerImageBasicAuth

Password string
Username string
Password string
Username string
password String
username String
password string
username string
password String
username String

Import

The resource instance pool can be imported using it’s idbash

 $ pulumi import databricks:index/instancePool:InstancePool this <instance-pool-id>

Package Details

Repository
databricks pulumi/pulumi-databricks
License
Apache-2.0
Notes

This Pulumi package is based on the databricks Terraform Provider.