1. Packages
  2. Databricks
  3. API Docs
  4. InstancePool
Databricks v1.36.0 published on Friday, Apr 19, 2024 by Pulumi

databricks.InstancePool

Explore with Pulumi AI

databricks logo
Databricks v1.36.0 published on Friday, Apr 19, 2024 by Pulumi

    This resource allows you to manage instance pools to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. An instance pool reduces cluster start and auto-scaling times by maintaining a set of idle, ready-to-use cloud instances. When a cluster attached to a pool needs an instance, it first attempts to allocate one of the pool’s idle instances. If the pool has no idle instances, it expands by allocating a new instance from the instance provider in order to accommodate the cluster’s request. When a cluster releases an instance, it returns to the pool and is free for another cluster to use. Only clusters attached to a pool can use that pool’s idle instances.

    Note It is important to know that different cloud service providers have different node_type_id, disk_specs and potentially other configurations.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const smallest = databricks.getNodeType({});
    const smallestNodes = new databricks.InstancePool("smallest_nodes", {
        instancePoolName: "Smallest Nodes",
        minIdleInstances: 0,
        maxCapacity: 300,
        nodeTypeId: smallest.then(smallest => smallest.id),
        awsAttributes: {
            availability: "ON_DEMAND",
            zoneId: "us-east-1a",
            spotBidPricePercent: 100,
        },
        idleInstanceAutoterminationMinutes: 10,
        diskSpec: {
            diskType: {
                ebsVolumeType: "GENERAL_PURPOSE_SSD",
            },
            diskSize: 80,
            diskCount: 1,
        },
    });
    
    import pulumi
    import pulumi_databricks as databricks
    
    smallest = databricks.get_node_type()
    smallest_nodes = databricks.InstancePool("smallest_nodes",
        instance_pool_name="Smallest Nodes",
        min_idle_instances=0,
        max_capacity=300,
        node_type_id=smallest.id,
        aws_attributes=databricks.InstancePoolAwsAttributesArgs(
            availability="ON_DEMAND",
            zone_id="us-east-1a",
            spot_bid_price_percent=100,
        ),
        idle_instance_autotermination_minutes=10,
        disk_spec=databricks.InstancePoolDiskSpecArgs(
            disk_type=databricks.InstancePoolDiskSpecDiskTypeArgs(
                ebs_volume_type="GENERAL_PURPOSE_SSD",
            ),
            disk_size=80,
            disk_count=1,
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		smallest, err := databricks.GetNodeType(ctx, nil, nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewInstancePool(ctx, "smallest_nodes", &databricks.InstancePoolArgs{
    			InstancePoolName: pulumi.String("Smallest Nodes"),
    			MinIdleInstances: pulumi.Int(0),
    			MaxCapacity:      pulumi.Int(300),
    			NodeTypeId:       pulumi.String(smallest.Id),
    			AwsAttributes: &databricks.InstancePoolAwsAttributesArgs{
    				Availability:        pulumi.String("ON_DEMAND"),
    				ZoneId:              pulumi.String("us-east-1a"),
    				SpotBidPricePercent: pulumi.Int(100),
    			},
    			IdleInstanceAutoterminationMinutes: pulumi.Int(10),
    			DiskSpec: &databricks.InstancePoolDiskSpecArgs{
    				DiskType: &databricks.InstancePoolDiskSpecDiskTypeArgs{
    					EbsVolumeType: pulumi.String("GENERAL_PURPOSE_SSD"),
    				},
    				DiskSize:  pulumi.Int(80),
    				DiskCount: pulumi.Int(1),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var smallest = Databricks.GetNodeType.Invoke();
    
        var smallestNodes = new Databricks.InstancePool("smallest_nodes", new()
        {
            InstancePoolName = "Smallest Nodes",
            MinIdleInstances = 0,
            MaxCapacity = 300,
            NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
            AwsAttributes = new Databricks.Inputs.InstancePoolAwsAttributesArgs
            {
                Availability = "ON_DEMAND",
                ZoneId = "us-east-1a",
                SpotBidPricePercent = 100,
            },
            IdleInstanceAutoterminationMinutes = 10,
            DiskSpec = new Databricks.Inputs.InstancePoolDiskSpecArgs
            {
                DiskType = new Databricks.Inputs.InstancePoolDiskSpecDiskTypeArgs
                {
                    EbsVolumeType = "GENERAL_PURPOSE_SSD",
                },
                DiskSize = 80,
                DiskCount = 1,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetNodeTypeArgs;
    import com.pulumi.databricks.InstancePool;
    import com.pulumi.databricks.InstancePoolArgs;
    import com.pulumi.databricks.inputs.InstancePoolAwsAttributesArgs;
    import com.pulumi.databricks.inputs.InstancePoolDiskSpecArgs;
    import com.pulumi.databricks.inputs.InstancePoolDiskSpecDiskTypeArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var smallest = DatabricksFunctions.getNodeType();
    
            var smallestNodes = new InstancePool("smallestNodes", InstancePoolArgs.builder()        
                .instancePoolName("Smallest Nodes")
                .minIdleInstances(0)
                .maxCapacity(300)
                .nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
                .awsAttributes(InstancePoolAwsAttributesArgs.builder()
                    .availability("ON_DEMAND")
                    .zoneId("us-east-1a")
                    .spotBidPricePercent("100")
                    .build())
                .idleInstanceAutoterminationMinutes(10)
                .diskSpec(InstancePoolDiskSpecArgs.builder()
                    .diskType(InstancePoolDiskSpecDiskTypeArgs.builder()
                        .ebsVolumeType("GENERAL_PURPOSE_SSD")
                        .build())
                    .diskSize(80)
                    .diskCount(1)
                    .build())
                .build());
    
        }
    }
    
    resources:
      smallestNodes:
        type: databricks:InstancePool
        name: smallest_nodes
        properties:
          instancePoolName: Smallest Nodes
          minIdleInstances: 0
          maxCapacity: 300
          nodeTypeId: ${smallest.id}
          awsAttributes:
            availability: ON_DEMAND
            zoneId: us-east-1a
            spotBidPricePercent: '100'
          idleInstanceAutoterminationMinutes: 10
          diskSpec:
            diskType:
              ebsVolumeType: GENERAL_PURPOSE_SSD
            diskSize: 80
            diskCount: 1
    variables:
      smallest:
        fn::invoke:
          Function: databricks:getNodeType
          Arguments: {}
    

    Access Control

    • databricks.Group and databricks.User can control which groups or individual users can create instance pools.
    • databricks.Permissions can control which groups or individual users can Manage or Attach to individual instance pools.

    Create InstancePool Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new InstancePool(name: string, args: InstancePoolArgs, opts?: CustomResourceOptions);
    @overload
    def InstancePool(resource_name: str,
                     args: InstancePoolArgs,
                     opts: Optional[ResourceOptions] = None)
    
    @overload
    def InstancePool(resource_name: str,
                     opts: Optional[ResourceOptions] = None,
                     instance_pool_name: Optional[str] = None,
                     idle_instance_autotermination_minutes: Optional[int] = None,
                     instance_pool_id: Optional[str] = None,
                     disk_spec: Optional[InstancePoolDiskSpecArgs] = None,
                     enable_elastic_disk: Optional[bool] = None,
                     gcp_attributes: Optional[InstancePoolGcpAttributesArgs] = None,
                     azure_attributes: Optional[InstancePoolAzureAttributesArgs] = None,
                     instance_pool_fleet_attributes: Optional[InstancePoolInstancePoolFleetAttributesArgs] = None,
                     custom_tags: Optional[Mapping[str, Any]] = None,
                     aws_attributes: Optional[InstancePoolAwsAttributesArgs] = None,
                     max_capacity: Optional[int] = None,
                     min_idle_instances: Optional[int] = None,
                     node_type_id: Optional[str] = None,
                     preloaded_docker_images: Optional[Sequence[InstancePoolPreloadedDockerImageArgs]] = None,
                     preloaded_spark_versions: Optional[Sequence[str]] = None)
    func NewInstancePool(ctx *Context, name string, args InstancePoolArgs, opts ...ResourceOption) (*InstancePool, error)
    public InstancePool(string name, InstancePoolArgs args, CustomResourceOptions? opts = null)
    public InstancePool(String name, InstancePoolArgs args)
    public InstancePool(String name, InstancePoolArgs args, CustomResourceOptions options)
    
    type: databricks:InstancePool
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args InstancePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args InstancePoolArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args InstancePoolArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args InstancePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args InstancePoolArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var instancePoolResource = new Databricks.InstancePool("instancePoolResource", new()
    {
        InstancePoolName = "string",
        IdleInstanceAutoterminationMinutes = 0,
        InstancePoolId = "string",
        DiskSpec = new Databricks.Inputs.InstancePoolDiskSpecArgs
        {
            DiskCount = 0,
            DiskSize = 0,
            DiskType = new Databricks.Inputs.InstancePoolDiskSpecDiskTypeArgs
            {
                AzureDiskVolumeType = "string",
                EbsVolumeType = "string",
            },
        },
        EnableElasticDisk = false,
        GcpAttributes = new Databricks.Inputs.InstancePoolGcpAttributesArgs
        {
            GcpAvailability = "string",
            LocalSsdCount = 0,
            ZoneId = "string",
        },
        AzureAttributes = new Databricks.Inputs.InstancePoolAzureAttributesArgs
        {
            Availability = "string",
            SpotBidMaxPrice = 0,
        },
        InstancePoolFleetAttributes = new Databricks.Inputs.InstancePoolInstancePoolFleetAttributesArgs
        {
            LaunchTemplateOverrides = new[]
            {
                new Databricks.Inputs.InstancePoolInstancePoolFleetAttributesLaunchTemplateOverrideArgs
                {
                    AvailabilityZone = "string",
                    InstanceType = "string",
                },
            },
            FleetOnDemandOption = new Databricks.Inputs.InstancePoolInstancePoolFleetAttributesFleetOnDemandOptionArgs
            {
                AllocationStrategy = "string",
                InstancePoolsToUseCount = 0,
            },
            FleetSpotOption = new Databricks.Inputs.InstancePoolInstancePoolFleetAttributesFleetSpotOptionArgs
            {
                AllocationStrategy = "string",
                InstancePoolsToUseCount = 0,
            },
        },
        CustomTags = 
        {
            { "string", "any" },
        },
        AwsAttributes = new Databricks.Inputs.InstancePoolAwsAttributesArgs
        {
            Availability = "string",
            SpotBidPricePercent = 0,
            ZoneId = "string",
        },
        MaxCapacity = 0,
        MinIdleInstances = 0,
        NodeTypeId = "string",
        PreloadedDockerImages = new[]
        {
            new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs
            {
                Url = "string",
                BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs
                {
                    Password = "string",
                    Username = "string",
                },
            },
        },
        PreloadedSparkVersions = new[]
        {
            "string",
        },
    });
    
    example, err := databricks.NewInstancePool(ctx, "instancePoolResource", &databricks.InstancePoolArgs{
    	InstancePoolName:                   pulumi.String("string"),
    	IdleInstanceAutoterminationMinutes: pulumi.Int(0),
    	InstancePoolId:                     pulumi.String("string"),
    	DiskSpec: &databricks.InstancePoolDiskSpecArgs{
    		DiskCount: pulumi.Int(0),
    		DiskSize:  pulumi.Int(0),
    		DiskType: &databricks.InstancePoolDiskSpecDiskTypeArgs{
    			AzureDiskVolumeType: pulumi.String("string"),
    			EbsVolumeType:       pulumi.String("string"),
    		},
    	},
    	EnableElasticDisk: pulumi.Bool(false),
    	GcpAttributes: &databricks.InstancePoolGcpAttributesArgs{
    		GcpAvailability: pulumi.String("string"),
    		LocalSsdCount:   pulumi.Int(0),
    		ZoneId:          pulumi.String("string"),
    	},
    	AzureAttributes: &databricks.InstancePoolAzureAttributesArgs{
    		Availability:    pulumi.String("string"),
    		SpotBidMaxPrice: pulumi.Float64(0),
    	},
    	InstancePoolFleetAttributes: &databricks.InstancePoolInstancePoolFleetAttributesArgs{
    		LaunchTemplateOverrides: databricks.InstancePoolInstancePoolFleetAttributesLaunchTemplateOverrideArray{
    			&databricks.InstancePoolInstancePoolFleetAttributesLaunchTemplateOverrideArgs{
    				AvailabilityZone: pulumi.String("string"),
    				InstanceType:     pulumi.String("string"),
    			},
    		},
    		FleetOnDemandOption: &databricks.InstancePoolInstancePoolFleetAttributesFleetOnDemandOptionArgs{
    			AllocationStrategy:      pulumi.String("string"),
    			InstancePoolsToUseCount: pulumi.Int(0),
    		},
    		FleetSpotOption: &databricks.InstancePoolInstancePoolFleetAttributesFleetSpotOptionArgs{
    			AllocationStrategy:      pulumi.String("string"),
    			InstancePoolsToUseCount: pulumi.Int(0),
    		},
    	},
    	CustomTags: pulumi.Map{
    		"string": pulumi.Any("any"),
    	},
    	AwsAttributes: &databricks.InstancePoolAwsAttributesArgs{
    		Availability:        pulumi.String("string"),
    		SpotBidPricePercent: pulumi.Int(0),
    		ZoneId:              pulumi.String("string"),
    	},
    	MaxCapacity:      pulumi.Int(0),
    	MinIdleInstances: pulumi.Int(0),
    	NodeTypeId:       pulumi.String("string"),
    	PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{
    		&databricks.InstancePoolPreloadedDockerImageArgs{
    			Url: pulumi.String("string"),
    			BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{
    				Password: pulumi.String("string"),
    				Username: pulumi.String("string"),
    			},
    		},
    	},
    	PreloadedSparkVersions: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    })
    
    var instancePoolResource = new InstancePool("instancePoolResource", InstancePoolArgs.builder()        
        .instancePoolName("string")
        .idleInstanceAutoterminationMinutes(0)
        .instancePoolId("string")
        .diskSpec(InstancePoolDiskSpecArgs.builder()
            .diskCount(0)
            .diskSize(0)
            .diskType(InstancePoolDiskSpecDiskTypeArgs.builder()
                .azureDiskVolumeType("string")
                .ebsVolumeType("string")
                .build())
            .build())
        .enableElasticDisk(false)
        .gcpAttributes(InstancePoolGcpAttributesArgs.builder()
            .gcpAvailability("string")
            .localSsdCount(0)
            .zoneId("string")
            .build())
        .azureAttributes(InstancePoolAzureAttributesArgs.builder()
            .availability("string")
            .spotBidMaxPrice(0)
            .build())
        .instancePoolFleetAttributes(InstancePoolInstancePoolFleetAttributesArgs.builder()
            .launchTemplateOverrides(InstancePoolInstancePoolFleetAttributesLaunchTemplateOverrideArgs.builder()
                .availabilityZone("string")
                .instanceType("string")
                .build())
            .fleetOnDemandOption(InstancePoolInstancePoolFleetAttributesFleetOnDemandOptionArgs.builder()
                .allocationStrategy("string")
                .instancePoolsToUseCount(0)
                .build())
            .fleetSpotOption(InstancePoolInstancePoolFleetAttributesFleetSpotOptionArgs.builder()
                .allocationStrategy("string")
                .instancePoolsToUseCount(0)
                .build())
            .build())
        .customTags(Map.of("string", "any"))
        .awsAttributes(InstancePoolAwsAttributesArgs.builder()
            .availability("string")
            .spotBidPricePercent(0)
            .zoneId("string")
            .build())
        .maxCapacity(0)
        .minIdleInstances(0)
        .nodeTypeId("string")
        .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder()
            .url("string")
            .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder()
                .password("string")
                .username("string")
                .build())
            .build())
        .preloadedSparkVersions("string")
        .build());
    
    instance_pool_resource = databricks.InstancePool("instancePoolResource",
        instance_pool_name="string",
        idle_instance_autotermination_minutes=0,
        instance_pool_id="string",
        disk_spec=databricks.InstancePoolDiskSpecArgs(
            disk_count=0,
            disk_size=0,
            disk_type=databricks.InstancePoolDiskSpecDiskTypeArgs(
                azure_disk_volume_type="string",
                ebs_volume_type="string",
            ),
        ),
        enable_elastic_disk=False,
        gcp_attributes=databricks.InstancePoolGcpAttributesArgs(
            gcp_availability="string",
            local_ssd_count=0,
            zone_id="string",
        ),
        azure_attributes=databricks.InstancePoolAzureAttributesArgs(
            availability="string",
            spot_bid_max_price=0,
        ),
        instance_pool_fleet_attributes=databricks.InstancePoolInstancePoolFleetAttributesArgs(
            launch_template_overrides=[databricks.InstancePoolInstancePoolFleetAttributesLaunchTemplateOverrideArgs(
                availability_zone="string",
                instance_type="string",
            )],
            fleet_on_demand_option=databricks.InstancePoolInstancePoolFleetAttributesFleetOnDemandOptionArgs(
                allocation_strategy="string",
                instance_pools_to_use_count=0,
            ),
            fleet_spot_option=databricks.InstancePoolInstancePoolFleetAttributesFleetSpotOptionArgs(
                allocation_strategy="string",
                instance_pools_to_use_count=0,
            ),
        ),
        custom_tags={
            "string": "any",
        },
        aws_attributes=databricks.InstancePoolAwsAttributesArgs(
            availability="string",
            spot_bid_price_percent=0,
            zone_id="string",
        ),
        max_capacity=0,
        min_idle_instances=0,
        node_type_id="string",
        preloaded_docker_images=[databricks.InstancePoolPreloadedDockerImageArgs(
            url="string",
            basic_auth=databricks.InstancePoolPreloadedDockerImageBasicAuthArgs(
                password="string",
                username="string",
            ),
        )],
        preloaded_spark_versions=["string"])
    
    const instancePoolResource = new databricks.InstancePool("instancePoolResource", {
        instancePoolName: "string",
        idleInstanceAutoterminationMinutes: 0,
        instancePoolId: "string",
        diskSpec: {
            diskCount: 0,
            diskSize: 0,
            diskType: {
                azureDiskVolumeType: "string",
                ebsVolumeType: "string",
            },
        },
        enableElasticDisk: false,
        gcpAttributes: {
            gcpAvailability: "string",
            localSsdCount: 0,
            zoneId: "string",
        },
        azureAttributes: {
            availability: "string",
            spotBidMaxPrice: 0,
        },
        instancePoolFleetAttributes: {
            launchTemplateOverrides: [{
                availabilityZone: "string",
                instanceType: "string",
            }],
            fleetOnDemandOption: {
                allocationStrategy: "string",
                instancePoolsToUseCount: 0,
            },
            fleetSpotOption: {
                allocationStrategy: "string",
                instancePoolsToUseCount: 0,
            },
        },
        customTags: {
            string: "any",
        },
        awsAttributes: {
            availability: "string",
            spotBidPricePercent: 0,
            zoneId: "string",
        },
        maxCapacity: 0,
        minIdleInstances: 0,
        nodeTypeId: "string",
        preloadedDockerImages: [{
            url: "string",
            basicAuth: {
                password: "string",
                username: "string",
            },
        }],
        preloadedSparkVersions: ["string"],
    });
    
    type: databricks:InstancePool
    properties:
        awsAttributes:
            availability: string
            spotBidPricePercent: 0
            zoneId: string
        azureAttributes:
            availability: string
            spotBidMaxPrice: 0
        customTags:
            string: any
        diskSpec:
            diskCount: 0
            diskSize: 0
            diskType:
                azureDiskVolumeType: string
                ebsVolumeType: string
        enableElasticDisk: false
        gcpAttributes:
            gcpAvailability: string
            localSsdCount: 0
            zoneId: string
        idleInstanceAutoterminationMinutes: 0
        instancePoolFleetAttributes:
            fleetOnDemandOption:
                allocationStrategy: string
                instancePoolsToUseCount: 0
            fleetSpotOption:
                allocationStrategy: string
                instancePoolsToUseCount: 0
            launchTemplateOverrides:
                - availabilityZone: string
                  instanceType: string
        instancePoolId: string
        instancePoolName: string
        maxCapacity: 0
        minIdleInstances: 0
        nodeTypeId: string
        preloadedDockerImages:
            - basicAuth:
                password: string
                username: string
              url: string
        preloadedSparkVersions:
            - string
    

    InstancePool Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The InstancePool resource accepts the following input properties:

    IdleInstanceAutoterminationMinutes int
    (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
    InstancePoolName string
    (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
    AwsAttributes InstancePoolAwsAttributes
    AzureAttributes InstancePoolAzureAttributes
    CustomTags Dictionary<string, object>
    (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
    DiskSpec InstancePoolDiskSpec
    EnableElasticDisk bool
    (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
    GcpAttributes InstancePoolGcpAttributes
    InstancePoolFleetAttributes InstancePoolInstancePoolFleetAttributes
    InstancePoolId string
    MaxCapacity int
    (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
    MinIdleInstances int
    (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
    NodeTypeId string
    (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
    PreloadedDockerImages List<InstancePoolPreloadedDockerImage>
    PreloadedSparkVersions List<string>
    (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
    IdleInstanceAutoterminationMinutes int
    (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
    InstancePoolName string
    (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
    AwsAttributes InstancePoolAwsAttributesArgs
    AzureAttributes InstancePoolAzureAttributesArgs
    CustomTags map[string]interface{}
    (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
    DiskSpec InstancePoolDiskSpecArgs
    EnableElasticDisk bool
    (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
    GcpAttributes InstancePoolGcpAttributesArgs
    InstancePoolFleetAttributes InstancePoolInstancePoolFleetAttributesArgs
    InstancePoolId string
    MaxCapacity int
    (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
    MinIdleInstances int
    (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
    NodeTypeId string
    (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
    PreloadedDockerImages []InstancePoolPreloadedDockerImageArgs
    PreloadedSparkVersions []string
    (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
    idleInstanceAutoterminationMinutes Integer
    (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
    instancePoolName String
    (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
    awsAttributes InstancePoolAwsAttributes
    azureAttributes InstancePoolAzureAttributes
    customTags Map<String,Object>
    (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
    diskSpec InstancePoolDiskSpec
    enableElasticDisk Boolean
    (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
    gcpAttributes InstancePoolGcpAttributes
    instancePoolFleetAttributes InstancePoolInstancePoolFleetAttributes
    instancePoolId String
    maxCapacity Integer
    (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
    minIdleInstances Integer
    (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
    nodeTypeId String
    (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
    preloadedDockerImages List<InstancePoolPreloadedDockerImage>
    preloadedSparkVersions List<String>
    (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
    idleInstanceAutoterminationMinutes number
    (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
    instancePoolName string
    (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
    awsAttributes InstancePoolAwsAttributes
    azureAttributes InstancePoolAzureAttributes
    customTags {[key: string]: any}
    (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
    diskSpec InstancePoolDiskSpec
    enableElasticDisk boolean
    (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
    gcpAttributes InstancePoolGcpAttributes
    instancePoolFleetAttributes InstancePoolInstancePoolFleetAttributes
    instancePoolId string
    maxCapacity number
    (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
    minIdleInstances number
    (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
    nodeTypeId string
    (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
    preloadedDockerImages InstancePoolPreloadedDockerImage[]
    preloadedSparkVersions string[]
    (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
    idle_instance_autotermination_minutes int
    (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
    instance_pool_name str
    (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
    aws_attributes InstancePoolAwsAttributesArgs
    azure_attributes InstancePoolAzureAttributesArgs
    custom_tags Mapping[str, Any]
    (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
    disk_spec InstancePoolDiskSpecArgs
    enable_elastic_disk bool
    (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
    gcp_attributes InstancePoolGcpAttributesArgs
    instance_pool_fleet_attributes InstancePoolInstancePoolFleetAttributesArgs
    instance_pool_id str
    max_capacity int
    (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
    min_idle_instances int
    (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
    node_type_id str
    (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
    preloaded_docker_images Sequence[InstancePoolPreloadedDockerImageArgs]
    preloaded_spark_versions Sequence[str]
    (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
    idleInstanceAutoterminationMinutes Number
    (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
    instancePoolName String
    (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
    awsAttributes Property Map
    azureAttributes Property Map
    customTags Map<Any>
    (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
    diskSpec Property Map
    enableElasticDisk Boolean
    (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
    gcpAttributes Property Map
    instancePoolFleetAttributes Property Map
    instancePoolId String
    maxCapacity Number
    (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
    minIdleInstances Number
    (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
    nodeTypeId String
    (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
    preloadedDockerImages List<Property Map>
    preloadedSparkVersions List<String>
    (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the InstancePool resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing InstancePool Resource

    Get an existing InstancePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: InstancePoolState, opts?: CustomResourceOptions): InstancePool
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            aws_attributes: Optional[InstancePoolAwsAttributesArgs] = None,
            azure_attributes: Optional[InstancePoolAzureAttributesArgs] = None,
            custom_tags: Optional[Mapping[str, Any]] = None,
            disk_spec: Optional[InstancePoolDiskSpecArgs] = None,
            enable_elastic_disk: Optional[bool] = None,
            gcp_attributes: Optional[InstancePoolGcpAttributesArgs] = None,
            idle_instance_autotermination_minutes: Optional[int] = None,
            instance_pool_fleet_attributes: Optional[InstancePoolInstancePoolFleetAttributesArgs] = None,
            instance_pool_id: Optional[str] = None,
            instance_pool_name: Optional[str] = None,
            max_capacity: Optional[int] = None,
            min_idle_instances: Optional[int] = None,
            node_type_id: Optional[str] = None,
            preloaded_docker_images: Optional[Sequence[InstancePoolPreloadedDockerImageArgs]] = None,
            preloaded_spark_versions: Optional[Sequence[str]] = None) -> InstancePool
    func GetInstancePool(ctx *Context, name string, id IDInput, state *InstancePoolState, opts ...ResourceOption) (*InstancePool, error)
    public static InstancePool Get(string name, Input<string> id, InstancePoolState? state, CustomResourceOptions? opts = null)
    public static InstancePool get(String name, Output<String> id, InstancePoolState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AwsAttributes InstancePoolAwsAttributes
    AzureAttributes InstancePoolAzureAttributes
    CustomTags Dictionary<string, object>
    (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
    DiskSpec InstancePoolDiskSpec
    EnableElasticDisk bool
    (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
    GcpAttributes InstancePoolGcpAttributes
    IdleInstanceAutoterminationMinutes int
    (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
    InstancePoolFleetAttributes InstancePoolInstancePoolFleetAttributes
    InstancePoolId string
    InstancePoolName string
    (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
    MaxCapacity int
    (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
    MinIdleInstances int
    (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
    NodeTypeId string
    (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
    PreloadedDockerImages List<InstancePoolPreloadedDockerImage>
    PreloadedSparkVersions List<string>
    (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
    AwsAttributes InstancePoolAwsAttributesArgs
    AzureAttributes InstancePoolAzureAttributesArgs
    CustomTags map[string]interface{}
    (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
    DiskSpec InstancePoolDiskSpecArgs
    EnableElasticDisk bool
    (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
    GcpAttributes InstancePoolGcpAttributesArgs
    IdleInstanceAutoterminationMinutes int
    (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
    InstancePoolFleetAttributes InstancePoolInstancePoolFleetAttributesArgs
    InstancePoolId string
    InstancePoolName string
    (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
    MaxCapacity int
    (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
    MinIdleInstances int
    (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
    NodeTypeId string
    (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
    PreloadedDockerImages []InstancePoolPreloadedDockerImageArgs
    PreloadedSparkVersions []string
    (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
    awsAttributes InstancePoolAwsAttributes
    azureAttributes InstancePoolAzureAttributes
    customTags Map<String,Object>
    (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
    diskSpec InstancePoolDiskSpec
    enableElasticDisk Boolean
    (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
    gcpAttributes InstancePoolGcpAttributes
    idleInstanceAutoterminationMinutes Integer
    (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
    instancePoolFleetAttributes InstancePoolInstancePoolFleetAttributes
    instancePoolId String
    instancePoolName String
    (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
    maxCapacity Integer
    (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
    minIdleInstances Integer
    (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
    nodeTypeId String
    (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
    preloadedDockerImages List<InstancePoolPreloadedDockerImage>
    preloadedSparkVersions List<String>
    (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
    awsAttributes InstancePoolAwsAttributes
    azureAttributes InstancePoolAzureAttributes
    customTags {[key: string]: any}
    (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
    diskSpec InstancePoolDiskSpec
    enableElasticDisk boolean
    (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
    gcpAttributes InstancePoolGcpAttributes
    idleInstanceAutoterminationMinutes number
    (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
    instancePoolFleetAttributes InstancePoolInstancePoolFleetAttributes
    instancePoolId string
    instancePoolName string
    (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
    maxCapacity number
    (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
    minIdleInstances number
    (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
    nodeTypeId string
    (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
    preloadedDockerImages InstancePoolPreloadedDockerImage[]
    preloadedSparkVersions string[]
    (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
    aws_attributes InstancePoolAwsAttributesArgs
    azure_attributes InstancePoolAzureAttributesArgs
    custom_tags Mapping[str, Any]
    (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
    disk_spec InstancePoolDiskSpecArgs
    enable_elastic_disk bool
    (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
    gcp_attributes InstancePoolGcpAttributesArgs
    idle_instance_autotermination_minutes int
    (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
    instance_pool_fleet_attributes InstancePoolInstancePoolFleetAttributesArgs
    instance_pool_id str
    instance_pool_name str
    (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
    max_capacity int
    (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
    min_idle_instances int
    (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
    node_type_id str
    (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
    preloaded_docker_images Sequence[InstancePoolPreloadedDockerImageArgs]
    preloaded_spark_versions Sequence[str]
    (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
    awsAttributes Property Map
    azureAttributes Property Map
    customTags Map<Any>
    (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
    diskSpec Property Map
    enableElasticDisk Boolean
    (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
    gcpAttributes Property Map
    idleInstanceAutoterminationMinutes Number
    (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
    instancePoolFleetAttributes Property Map
    instancePoolId String
    instancePoolName String
    (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
    maxCapacity Number
    (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
    minIdleInstances Number
    (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
    nodeTypeId String
    (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
    preloadedDockerImages List<Property Map>
    preloadedSparkVersions List<String>
    (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.

    Supporting Types

    InstancePoolAwsAttributes, InstancePoolAwsAttributesArgs

    Availability string
    (String) Availability type used for all instances in the pool. Only ON_DEMAND and SPOT are supported.
    SpotBidPricePercent int
    (Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.
    ZoneId string
    (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone ID if the Databricks deployment resides in the "us-east-1" region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.
    Availability string
    (String) Availability type used for all instances in the pool. Only ON_DEMAND and SPOT are supported.
    SpotBidPricePercent int
    (Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.
    ZoneId string
    (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone ID if the Databricks deployment resides in the "us-east-1" region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.
    availability String
    (String) Availability type used for all instances in the pool. Only ON_DEMAND and SPOT are supported.
    spotBidPricePercent Integer
    (Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.
    zoneId String
    (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone ID if the Databricks deployment resides in the "us-east-1" region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.
    availability string
    (String) Availability type used for all instances in the pool. Only ON_DEMAND and SPOT are supported.
    spotBidPricePercent number
    (Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.
    zoneId string
    (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone ID if the Databricks deployment resides in the "us-east-1" region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.
    availability str
    (String) Availability type used for all instances in the pool. Only ON_DEMAND and SPOT are supported.
    spot_bid_price_percent int
    (Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.
    zone_id str
    (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone ID if the Databricks deployment resides in the "us-east-1" region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.
    availability String
    (String) Availability type used for all instances in the pool. Only ON_DEMAND and SPOT are supported.
    spotBidPricePercent Number
    (Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.
    zoneId String
    (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone ID if the Databricks deployment resides in the "us-east-1" region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.

    InstancePoolAzureAttributes, InstancePoolAzureAttributesArgs

    Availability string
    Availability type used for all nodes. Valid values are SPOT_AZURE and ON_DEMAND_AZURE.
    SpotBidMaxPrice double
    The max price for Azure spot instances. Use -1 to specify the lowest price.
    Availability string
    Availability type used for all nodes. Valid values are SPOT_AZURE and ON_DEMAND_AZURE.
    SpotBidMaxPrice float64
    The max price for Azure spot instances. Use -1 to specify the lowest price.
    availability String
    Availability type used for all nodes. Valid values are SPOT_AZURE and ON_DEMAND_AZURE.
    spotBidMaxPrice Double
    The max price for Azure spot instances. Use -1 to specify the lowest price.
    availability string
    Availability type used for all nodes. Valid values are SPOT_AZURE and ON_DEMAND_AZURE.
    spotBidMaxPrice number
    The max price for Azure spot instances. Use -1 to specify the lowest price.
    availability str
    Availability type used for all nodes. Valid values are SPOT_AZURE and ON_DEMAND_AZURE.
    spot_bid_max_price float
    The max price for Azure spot instances. Use -1 to specify the lowest price.
    availability String
    Availability type used for all nodes. Valid values are SPOT_AZURE and ON_DEMAND_AZURE.
    spotBidMaxPrice Number
    The max price for Azure spot instances. Use -1 to specify the lowest price.

    InstancePoolDiskSpec, InstancePoolDiskSpecArgs

    DiskCount int
    (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.
    DiskSize int
    (Integer) The size of each disk (in GiB) to attach.
    DiskType InstancePoolDiskSpecDiskType
    DiskCount int
    (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.
    DiskSize int
    (Integer) The size of each disk (in GiB) to attach.
    DiskType InstancePoolDiskSpecDiskType
    diskCount Integer
    (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.
    diskSize Integer
    (Integer) The size of each disk (in GiB) to attach.
    diskType InstancePoolDiskSpecDiskType
    diskCount number
    (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.
    diskSize number
    (Integer) The size of each disk (in GiB) to attach.
    diskType InstancePoolDiskSpecDiskType
    disk_count int
    (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.
    disk_size int
    (Integer) The size of each disk (in GiB) to attach.
    disk_type InstancePoolDiskSpecDiskType
    diskCount Number
    (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.
    diskSize Number
    (Integer) The size of each disk (in GiB) to attach.
    diskType Property Map

    InstancePoolDiskSpecDiskType, InstancePoolDiskSpecDiskTypeArgs

    InstancePoolGcpAttributes, InstancePoolGcpAttributesArgs

    GcpAvailability string
    Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.
    LocalSsdCount int
    Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
    ZoneId string
    Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like us-central1-a. The provided availability zone must be in the same region as the Databricks workspace.
    GcpAvailability string
    Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.
    LocalSsdCount int
    Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
    ZoneId string
    Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like us-central1-a. The provided availability zone must be in the same region as the Databricks workspace.
    gcpAvailability String
    Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.
    localSsdCount Integer
    Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
    zoneId String
    Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like us-central1-a. The provided availability zone must be in the same region as the Databricks workspace.
    gcpAvailability string
    Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.
    localSsdCount number
    Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
    zoneId string
    Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like us-central1-a. The provided availability zone must be in the same region as the Databricks workspace.
    gcp_availability str
    Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.
    local_ssd_count int
    Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
    zone_id str
    Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like us-central1-a. The provided availability zone must be in the same region as the Databricks workspace.
    gcpAvailability String
    Availability type used for all nodes. Valid values are PREEMPTIBLE_GCP, PREEMPTIBLE_WITH_FALLBACK_GCP and ON_DEMAND_GCP, default: ON_DEMAND_GCP.
    localSsdCount Number
    Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
    zoneId String
    Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like us-central1-a. The provided availability zone must be in the same region as the Databricks workspace.

    InstancePoolInstancePoolFleetAttributes, InstancePoolInstancePoolFleetAttributesArgs

    InstancePoolInstancePoolFleetAttributesFleetOnDemandOption, InstancePoolInstancePoolFleetAttributesFleetOnDemandOptionArgs

    InstancePoolInstancePoolFleetAttributesFleetSpotOption, InstancePoolInstancePoolFleetAttributesFleetSpotOptionArgs

    InstancePoolInstancePoolFleetAttributesLaunchTemplateOverride, InstancePoolInstancePoolFleetAttributesLaunchTemplateOverrideArgs

    InstancePoolPreloadedDockerImage, InstancePoolPreloadedDockerImageArgs

    Url string
    URL for the Docker image
    BasicAuth InstancePoolPreloadedDockerImageBasicAuth

    basic_auth.username and basic_auth.password for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.

    Example usage with azurerm_container_registry, that you can adapt to your specific use-case:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    import * as docker from "@pulumi/docker";
    

    const _this = new docker.index.RegistryImage("this", { build: [{}], name: ${thisAzurermContainerRegistry.loginServer}/sample:latest, }); const thisInstancePool = new databricks.InstancePool("this", {preloadedDockerImages: [{ url: _this.name, basicAuth: { username: thisAzurermContainerRegistry.adminUsername, password: thisAzurermContainerRegistry.adminPassword, }, }]});

    import pulumi
    import pulumi_databricks as databricks
    import pulumi_docker as docker
    
    this = docker.index.RegistryImage("this",
        build=[{}],
        name=f{this_azurerm_container_registry.login_server}/sample:latest)
    this_instance_pool = databricks.InstancePool("this", preloaded_docker_images=[databricks.InstancePoolPreloadedDockerImageArgs(
        url=this["name"],
        basic_auth=databricks.InstancePoolPreloadedDockerImageBasicAuthArgs(
            username=this_azurerm_container_registry["adminUsername"],
            password=this_azurerm_container_registry["adminPassword"],
        ),
    )])
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    using Docker = Pulumi.Docker;
    
    return await Deployment.RunAsync(() => 
    {
        var @this = new Docker.Index.RegistryImage("this", new()
        {
            Build = new[]
            {
                null,
            },
            Name = $"{thisAzurermContainerRegistry.LoginServer}/sample:latest",
        });
    
        var thisInstancePool = new Databricks.InstancePool("this", new()
        {
            PreloadedDockerImages = new[]
            {
                new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs
                {
                    Url = @this.Name,
                    BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs
                    {
                        Username = thisAzurermContainerRegistry.AdminUsername,
                        Password = thisAzurermContainerRegistry.AdminPassword,
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-docker/sdk/v4/go/docker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		this, err := docker.NewRegistryImage(ctx, "this", &docker.RegistryImageArgs{
    			Build: []map[string]interface{}{
    				nil,
    			},
    			Name: fmt.Sprintf("%v/sample:latest", thisAzurermContainerRegistry.LoginServer),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{
    			PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{
    				&databricks.InstancePoolPreloadedDockerImageArgs{
    					Url: this.Name,
    					BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{
    						Username: pulumi.Any(thisAzurermContainerRegistry.AdminUsername),
    						Password: pulumi.Any(thisAzurermContainerRegistry.AdminPassword),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.docker.registryImage;
    import com.pulumi.docker.RegistryImageArgs;
    import com.pulumi.databricks.InstancePool;
    import com.pulumi.databricks.InstancePoolArgs;
    import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageArgs;
    import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageBasicAuthArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var this_ = new RegistryImage("this", RegistryImageArgs.builder()        
                .build()
                .name(String.format("%s/sample:latest", thisAzurermContainerRegistry.loginServer()))
                .build());
    
            var thisInstancePool = new InstancePool("thisInstancePool", InstancePoolArgs.builder()        
                .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder()
                    .url(this_.name())
                    .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder()
                        .username(thisAzurermContainerRegistry.adminUsername())
                        .password(thisAzurermContainerRegistry.adminPassword())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      this:
        type: docker:registryImage
        properties:
          build:
            - {}
          name: ${thisAzurermContainerRegistry.loginServer}/sample:latest
      thisInstancePool:
        type: databricks:InstancePool
        name: this
        properties:
          preloadedDockerImages:
            - url: ${this.name}
              basicAuth:
                username: ${thisAzurermContainerRegistry.adminUsername}
                password: ${thisAzurermContainerRegistry.adminPassword}
    
    Url string
    URL for the Docker image
    BasicAuth InstancePoolPreloadedDockerImageBasicAuth

    basic_auth.username and basic_auth.password for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.

    Example usage with azurerm_container_registry, that you can adapt to your specific use-case:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    import * as docker from "@pulumi/docker";
    

    const _this = new docker.index.RegistryImage("this", { build: [{}], name: ${thisAzurermContainerRegistry.loginServer}/sample:latest, }); const thisInstancePool = new databricks.InstancePool("this", {preloadedDockerImages: [{ url: _this.name, basicAuth: { username: thisAzurermContainerRegistry.adminUsername, password: thisAzurermContainerRegistry.adminPassword, }, }]});

    import pulumi
    import pulumi_databricks as databricks
    import pulumi_docker as docker
    
    this = docker.index.RegistryImage("this",
        build=[{}],
        name=f{this_azurerm_container_registry.login_server}/sample:latest)
    this_instance_pool = databricks.InstancePool("this", preloaded_docker_images=[databricks.InstancePoolPreloadedDockerImageArgs(
        url=this["name"],
        basic_auth=databricks.InstancePoolPreloadedDockerImageBasicAuthArgs(
            username=this_azurerm_container_registry["adminUsername"],
            password=this_azurerm_container_registry["adminPassword"],
        ),
    )])
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    using Docker = Pulumi.Docker;
    
    return await Deployment.RunAsync(() => 
    {
        var @this = new Docker.Index.RegistryImage("this", new()
        {
            Build = new[]
            {
                null,
            },
            Name = $"{thisAzurermContainerRegistry.LoginServer}/sample:latest",
        });
    
        var thisInstancePool = new Databricks.InstancePool("this", new()
        {
            PreloadedDockerImages = new[]
            {
                new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs
                {
                    Url = @this.Name,
                    BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs
                    {
                        Username = thisAzurermContainerRegistry.AdminUsername,
                        Password = thisAzurermContainerRegistry.AdminPassword,
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-docker/sdk/v4/go/docker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		this, err := docker.NewRegistryImage(ctx, "this", &docker.RegistryImageArgs{
    			Build: []map[string]interface{}{
    				nil,
    			},
    			Name: fmt.Sprintf("%v/sample:latest", thisAzurermContainerRegistry.LoginServer),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{
    			PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{
    				&databricks.InstancePoolPreloadedDockerImageArgs{
    					Url: this.Name,
    					BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{
    						Username: pulumi.Any(thisAzurermContainerRegistry.AdminUsername),
    						Password: pulumi.Any(thisAzurermContainerRegistry.AdminPassword),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.docker.registryImage;
    import com.pulumi.docker.RegistryImageArgs;
    import com.pulumi.databricks.InstancePool;
    import com.pulumi.databricks.InstancePoolArgs;
    import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageArgs;
    import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageBasicAuthArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var this_ = new RegistryImage("this", RegistryImageArgs.builder()        
                .build()
                .name(String.format("%s/sample:latest", thisAzurermContainerRegistry.loginServer()))
                .build());
    
            var thisInstancePool = new InstancePool("thisInstancePool", InstancePoolArgs.builder()        
                .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder()
                    .url(this_.name())
                    .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder()
                        .username(thisAzurermContainerRegistry.adminUsername())
                        .password(thisAzurermContainerRegistry.adminPassword())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      this:
        type: docker:registryImage
        properties:
          build:
            - {}
          name: ${thisAzurermContainerRegistry.loginServer}/sample:latest
      thisInstancePool:
        type: databricks:InstancePool
        name: this
        properties:
          preloadedDockerImages:
            - url: ${this.name}
              basicAuth:
                username: ${thisAzurermContainerRegistry.adminUsername}
                password: ${thisAzurermContainerRegistry.adminPassword}
    
    url String
    URL for the Docker image
    basicAuth InstancePoolPreloadedDockerImageBasicAuth

    basic_auth.username and basic_auth.password for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.

    Example usage with azurerm_container_registry, that you can adapt to your specific use-case:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    import * as docker from "@pulumi/docker";
    

    const _this = new docker.index.RegistryImage("this", { build: [{}], name: ${thisAzurermContainerRegistry.loginServer}/sample:latest, }); const thisInstancePool = new databricks.InstancePool("this", {preloadedDockerImages: [{ url: _this.name, basicAuth: { username: thisAzurermContainerRegistry.adminUsername, password: thisAzurermContainerRegistry.adminPassword, }, }]});

    import pulumi
    import pulumi_databricks as databricks
    import pulumi_docker as docker
    
    this = docker.index.RegistryImage("this",
        build=[{}],
        name=f{this_azurerm_container_registry.login_server}/sample:latest)
    this_instance_pool = databricks.InstancePool("this", preloaded_docker_images=[databricks.InstancePoolPreloadedDockerImageArgs(
        url=this["name"],
        basic_auth=databricks.InstancePoolPreloadedDockerImageBasicAuthArgs(
            username=this_azurerm_container_registry["adminUsername"],
            password=this_azurerm_container_registry["adminPassword"],
        ),
    )])
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    using Docker = Pulumi.Docker;
    
    return await Deployment.RunAsync(() => 
    {
        var @this = new Docker.Index.RegistryImage("this", new()
        {
            Build = new[]
            {
                null,
            },
            Name = $"{thisAzurermContainerRegistry.LoginServer}/sample:latest",
        });
    
        var thisInstancePool = new Databricks.InstancePool("this", new()
        {
            PreloadedDockerImages = new[]
            {
                new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs
                {
                    Url = @this.Name,
                    BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs
                    {
                        Username = thisAzurermContainerRegistry.AdminUsername,
                        Password = thisAzurermContainerRegistry.AdminPassword,
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-docker/sdk/v4/go/docker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		this, err := docker.NewRegistryImage(ctx, "this", &docker.RegistryImageArgs{
    			Build: []map[string]interface{}{
    				nil,
    			},
    			Name: fmt.Sprintf("%v/sample:latest", thisAzurermContainerRegistry.LoginServer),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{
    			PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{
    				&databricks.InstancePoolPreloadedDockerImageArgs{
    					Url: this.Name,
    					BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{
    						Username: pulumi.Any(thisAzurermContainerRegistry.AdminUsername),
    						Password: pulumi.Any(thisAzurermContainerRegistry.AdminPassword),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.docker.registryImage;
    import com.pulumi.docker.RegistryImageArgs;
    import com.pulumi.databricks.InstancePool;
    import com.pulumi.databricks.InstancePoolArgs;
    import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageArgs;
    import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageBasicAuthArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var this_ = new RegistryImage("this", RegistryImageArgs.builder()        
                .build()
                .name(String.format("%s/sample:latest", thisAzurermContainerRegistry.loginServer()))
                .build());
    
            var thisInstancePool = new InstancePool("thisInstancePool", InstancePoolArgs.builder()        
                .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder()
                    .url(this_.name())
                    .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder()
                        .username(thisAzurermContainerRegistry.adminUsername())
                        .password(thisAzurermContainerRegistry.adminPassword())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      this:
        type: docker:registryImage
        properties:
          build:
            - {}
          name: ${thisAzurermContainerRegistry.loginServer}/sample:latest
      thisInstancePool:
        type: databricks:InstancePool
        name: this
        properties:
          preloadedDockerImages:
            - url: ${this.name}
              basicAuth:
                username: ${thisAzurermContainerRegistry.adminUsername}
                password: ${thisAzurermContainerRegistry.adminPassword}
    
    url string
    URL for the Docker image
    basicAuth InstancePoolPreloadedDockerImageBasicAuth

    basic_auth.username and basic_auth.password for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.

    Example usage with azurerm_container_registry, that you can adapt to your specific use-case:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    import * as docker from "@pulumi/docker";
    

    const _this = new docker.index.RegistryImage("this", { build: [{}], name: ${thisAzurermContainerRegistry.loginServer}/sample:latest, }); const thisInstancePool = new databricks.InstancePool("this", {preloadedDockerImages: [{ url: _this.name, basicAuth: { username: thisAzurermContainerRegistry.adminUsername, password: thisAzurermContainerRegistry.adminPassword, }, }]});

    import pulumi
    import pulumi_databricks as databricks
    import pulumi_docker as docker
    
    this = docker.index.RegistryImage("this",
        build=[{}],
        name=f{this_azurerm_container_registry.login_server}/sample:latest)
    this_instance_pool = databricks.InstancePool("this", preloaded_docker_images=[databricks.InstancePoolPreloadedDockerImageArgs(
        url=this["name"],
        basic_auth=databricks.InstancePoolPreloadedDockerImageBasicAuthArgs(
            username=this_azurerm_container_registry["adminUsername"],
            password=this_azurerm_container_registry["adminPassword"],
        ),
    )])
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    using Docker = Pulumi.Docker;
    
    return await Deployment.RunAsync(() => 
    {
        var @this = new Docker.Index.RegistryImage("this", new()
        {
            Build = new[]
            {
                null,
            },
            Name = $"{thisAzurermContainerRegistry.LoginServer}/sample:latest",
        });
    
        var thisInstancePool = new Databricks.InstancePool("this", new()
        {
            PreloadedDockerImages = new[]
            {
                new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs
                {
                    Url = @this.Name,
                    BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs
                    {
                        Username = thisAzurermContainerRegistry.AdminUsername,
                        Password = thisAzurermContainerRegistry.AdminPassword,
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-docker/sdk/v4/go/docker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		this, err := docker.NewRegistryImage(ctx, "this", &docker.RegistryImageArgs{
    			Build: []map[string]interface{}{
    				nil,
    			},
    			Name: fmt.Sprintf("%v/sample:latest", thisAzurermContainerRegistry.LoginServer),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{
    			PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{
    				&databricks.InstancePoolPreloadedDockerImageArgs{
    					Url: this.Name,
    					BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{
    						Username: pulumi.Any(thisAzurermContainerRegistry.AdminUsername),
    						Password: pulumi.Any(thisAzurermContainerRegistry.AdminPassword),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.docker.registryImage;
    import com.pulumi.docker.RegistryImageArgs;
    import com.pulumi.databricks.InstancePool;
    import com.pulumi.databricks.InstancePoolArgs;
    import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageArgs;
    import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageBasicAuthArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var this_ = new RegistryImage("this", RegistryImageArgs.builder()        
                .build()
                .name(String.format("%s/sample:latest", thisAzurermContainerRegistry.loginServer()))
                .build());
    
            var thisInstancePool = new InstancePool("thisInstancePool", InstancePoolArgs.builder()        
                .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder()
                    .url(this_.name())
                    .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder()
                        .username(thisAzurermContainerRegistry.adminUsername())
                        .password(thisAzurermContainerRegistry.adminPassword())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      this:
        type: docker:registryImage
        properties:
          build:
            - {}
          name: ${thisAzurermContainerRegistry.loginServer}/sample:latest
      thisInstancePool:
        type: databricks:InstancePool
        name: this
        properties:
          preloadedDockerImages:
            - url: ${this.name}
              basicAuth:
                username: ${thisAzurermContainerRegistry.adminUsername}
                password: ${thisAzurermContainerRegistry.adminPassword}
    
    url str
    URL for the Docker image
    basic_auth InstancePoolPreloadedDockerImageBasicAuth

    basic_auth.username and basic_auth.password for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.

    Example usage with azurerm_container_registry, that you can adapt to your specific use-case:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    import * as docker from "@pulumi/docker";
    

    const _this = new docker.index.RegistryImage("this", { build: [{}], name: ${thisAzurermContainerRegistry.loginServer}/sample:latest, }); const thisInstancePool = new databricks.InstancePool("this", {preloadedDockerImages: [{ url: _this.name, basicAuth: { username: thisAzurermContainerRegistry.adminUsername, password: thisAzurermContainerRegistry.adminPassword, }, }]});

    import pulumi
    import pulumi_databricks as databricks
    import pulumi_docker as docker
    
    this = docker.index.RegistryImage("this",
        build=[{}],
        name=f{this_azurerm_container_registry.login_server}/sample:latest)
    this_instance_pool = databricks.InstancePool("this", preloaded_docker_images=[databricks.InstancePoolPreloadedDockerImageArgs(
        url=this["name"],
        basic_auth=databricks.InstancePoolPreloadedDockerImageBasicAuthArgs(
            username=this_azurerm_container_registry["adminUsername"],
            password=this_azurerm_container_registry["adminPassword"],
        ),
    )])
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    using Docker = Pulumi.Docker;
    
    return await Deployment.RunAsync(() => 
    {
        var @this = new Docker.Index.RegistryImage("this", new()
        {
            Build = new[]
            {
                null,
            },
            Name = $"{thisAzurermContainerRegistry.LoginServer}/sample:latest",
        });
    
        var thisInstancePool = new Databricks.InstancePool("this", new()
        {
            PreloadedDockerImages = new[]
            {
                new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs
                {
                    Url = @this.Name,
                    BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs
                    {
                        Username = thisAzurermContainerRegistry.AdminUsername,
                        Password = thisAzurermContainerRegistry.AdminPassword,
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-docker/sdk/v4/go/docker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		this, err := docker.NewRegistryImage(ctx, "this", &docker.RegistryImageArgs{
    			Build: []map[string]interface{}{
    				nil,
    			},
    			Name: fmt.Sprintf("%v/sample:latest", thisAzurermContainerRegistry.LoginServer),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{
    			PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{
    				&databricks.InstancePoolPreloadedDockerImageArgs{
    					Url: this.Name,
    					BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{
    						Username: pulumi.Any(thisAzurermContainerRegistry.AdminUsername),
    						Password: pulumi.Any(thisAzurermContainerRegistry.AdminPassword),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.docker.registryImage;
    import com.pulumi.docker.RegistryImageArgs;
    import com.pulumi.databricks.InstancePool;
    import com.pulumi.databricks.InstancePoolArgs;
    import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageArgs;
    import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageBasicAuthArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var this_ = new RegistryImage("this", RegistryImageArgs.builder()        
                .build()
                .name(String.format("%s/sample:latest", thisAzurermContainerRegistry.loginServer()))
                .build());
    
            var thisInstancePool = new InstancePool("thisInstancePool", InstancePoolArgs.builder()        
                .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder()
                    .url(this_.name())
                    .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder()
                        .username(thisAzurermContainerRegistry.adminUsername())
                        .password(thisAzurermContainerRegistry.adminPassword())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      this:
        type: docker:registryImage
        properties:
          build:
            - {}
          name: ${thisAzurermContainerRegistry.loginServer}/sample:latest
      thisInstancePool:
        type: databricks:InstancePool
        name: this
        properties:
          preloadedDockerImages:
            - url: ${this.name}
              basicAuth:
                username: ${thisAzurermContainerRegistry.adminUsername}
                password: ${thisAzurermContainerRegistry.adminPassword}
    
    url String
    URL for the Docker image
    basicAuth Property Map

    basic_auth.username and basic_auth.password for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.

    Example usage with azurerm_container_registry, that you can adapt to your specific use-case:

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    import * as docker from "@pulumi/docker";
    

    const _this = new docker.index.RegistryImage("this", { build: [{}], name: ${thisAzurermContainerRegistry.loginServer}/sample:latest, }); const thisInstancePool = new databricks.InstancePool("this", {preloadedDockerImages: [{ url: _this.name, basicAuth: { username: thisAzurermContainerRegistry.adminUsername, password: thisAzurermContainerRegistry.adminPassword, }, }]});

    import pulumi
    import pulumi_databricks as databricks
    import pulumi_docker as docker
    
    this = docker.index.RegistryImage("this",
        build=[{}],
        name=f{this_azurerm_container_registry.login_server}/sample:latest)
    this_instance_pool = databricks.InstancePool("this", preloaded_docker_images=[databricks.InstancePoolPreloadedDockerImageArgs(
        url=this["name"],
        basic_auth=databricks.InstancePoolPreloadedDockerImageBasicAuthArgs(
            username=this_azurerm_container_registry["adminUsername"],
            password=this_azurerm_container_registry["adminPassword"],
        ),
    )])
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    using Docker = Pulumi.Docker;
    
    return await Deployment.RunAsync(() => 
    {
        var @this = new Docker.Index.RegistryImage("this", new()
        {
            Build = new[]
            {
                null,
            },
            Name = $"{thisAzurermContainerRegistry.LoginServer}/sample:latest",
        });
    
        var thisInstancePool = new Databricks.InstancePool("this", new()
        {
            PreloadedDockerImages = new[]
            {
                new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs
                {
                    Url = @this.Name,
                    BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs
                    {
                        Username = thisAzurermContainerRegistry.AdminUsername,
                        Password = thisAzurermContainerRegistry.AdminPassword,
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-docker/sdk/v4/go/docker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		this, err := docker.NewRegistryImage(ctx, "this", &docker.RegistryImageArgs{
    			Build: []map[string]interface{}{
    				nil,
    			},
    			Name: fmt.Sprintf("%v/sample:latest", thisAzurermContainerRegistry.LoginServer),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{
    			PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{
    				&databricks.InstancePoolPreloadedDockerImageArgs{
    					Url: this.Name,
    					BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{
    						Username: pulumi.Any(thisAzurermContainerRegistry.AdminUsername),
    						Password: pulumi.Any(thisAzurermContainerRegistry.AdminPassword),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.docker.registryImage;
    import com.pulumi.docker.RegistryImageArgs;
    import com.pulumi.databricks.InstancePool;
    import com.pulumi.databricks.InstancePoolArgs;
    import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageArgs;
    import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageBasicAuthArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var this_ = new RegistryImage("this", RegistryImageArgs.builder()        
                .build()
                .name(String.format("%s/sample:latest", thisAzurermContainerRegistry.loginServer()))
                .build());
    
            var thisInstancePool = new InstancePool("thisInstancePool", InstancePoolArgs.builder()        
                .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder()
                    .url(this_.name())
                    .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder()
                        .username(thisAzurermContainerRegistry.adminUsername())
                        .password(thisAzurermContainerRegistry.adminPassword())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      this:
        type: docker:registryImage
        properties:
          build:
            - {}
          name: ${thisAzurermContainerRegistry.loginServer}/sample:latest
      thisInstancePool:
        type: databricks:InstancePool
        name: this
        properties:
          preloadedDockerImages:
            - url: ${this.name}
              basicAuth:
                username: ${thisAzurermContainerRegistry.adminUsername}
                password: ${thisAzurermContainerRegistry.adminPassword}
    

    InstancePoolPreloadedDockerImageBasicAuth, InstancePoolPreloadedDockerImageBasicAuthArgs

    Password string
    Username string
    Password string
    Username string
    password String
    username String
    password string
    username string
    password String
    username String

    Import

    The resource instance pool can be imported using it’s id:

    bash

    $ pulumi import databricks:index/instancePool:InstancePool this <instance-pool-id>
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    databricks pulumi/pulumi-databricks
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the databricks Terraform Provider.
    databricks logo
    Databricks v1.36.0 published on Friday, Apr 19, 2024 by Pulumi