published on Monday, Mar 9, 2026 by Pulumi
published on Monday, Mar 9, 2026 by Pulumi
Use databricks.Pipeline to deploy Delta Live Tables.
Related Resources
The following resources are often used in the same context:
- End to end workspace management guide.
- databricks.Cluster to create Databricks Clusters.
- databricks.Job to manage Databricks Jobs to run non-interactive code in a databricks_cluster.
- databricks.Notebook to manage Databricks Notebooks.
Example Usage
using Pulumi;
using Databricks = Pulumi.Databricks;
class MyStack : Stack
{
public MyStack()
{
var dltDemo = new Databricks.Notebook("dltDemo", new Databricks.NotebookArgs
{
});
//...
var @this = new Databricks.Pipeline("this", new Databricks.PipelineArgs
{
Storage = "/test/first-pipeline",
Configuration =
{
{ "key1", "value1" },
{ "key2", "value2" },
},
Clusters =
{
new Databricks.Inputs.PipelineClusterArgs
{
Label = "default",
NumWorkers = 2,
CustomTags =
{
{ "cluster_type", "default" },
},
},
new Databricks.Inputs.PipelineClusterArgs
{
Label = "maintenance",
NumWorkers = 1,
CustomTags =
{
{ "cluster_type", "maintenance" },
},
},
},
Libraries =
{
new Databricks.Inputs.PipelineLibraryArgs
{
Notebook = new Databricks.Inputs.PipelineLibraryNotebookArgs
{
Path = dltDemo.Id,
},
},
},
Filters = new Databricks.Inputs.PipelineFiltersArgs
{
Includes =
{
"com.databricks.include",
},
Excludes =
{
"com.databricks.exclude",
},
},
Continuous = false,
});
}
}
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
dltDemo, err := databricks.NewNotebook(ctx, "dltDemo", nil)
if err != nil {
return err
}
_, err = databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{
Storage: pulumi.String("/test/first-pipeline"),
Configuration: pulumi.AnyMap{
"key1": pulumi.Any("value1"),
"key2": pulumi.Any("value2"),
},
Clusters: PipelineClusterArray{
&PipelineClusterArgs{
Label: pulumi.String("default"),
NumWorkers: pulumi.Int(2),
CustomTags: pulumi.AnyMap{
"cluster_type": pulumi.Any("default"),
},
},
&PipelineClusterArgs{
Label: pulumi.String("maintenance"),
NumWorkers: pulumi.Int(1),
CustomTags: pulumi.AnyMap{
"cluster_type": pulumi.Any("maintenance"),
},
},
},
Libraries: PipelineLibraryArray{
&PipelineLibraryArgs{
Notebook: &PipelineLibraryNotebookArgs{
Path: dltDemo.ID(),
},
},
},
Filters: &PipelineFiltersArgs{
Includes: pulumi.StringArray{
pulumi.String("com.databricks.include"),
},
Excludes: pulumi.StringArray{
pulumi.String("com.databricks.exclude"),
},
},
Continuous: pulumi.Bool(false),
})
if err != nil {
return err
}
return nil
})
}
Example coming soon!
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const dltDemo = new databricks.Notebook("dltDemo", {});
//...
const _this = new databricks.Pipeline("this", {
storage: "/test/first-pipeline",
configuration: {
key1: "value1",
key2: "value2",
},
clusters: [
{
label: "default",
numWorkers: 2,
customTags: {
cluster_type: "default",
},
},
{
label: "maintenance",
numWorkers: 1,
customTags: {
cluster_type: "maintenance",
},
},
],
libraries: [{
notebook: {
path: dltDemo.id,
},
}],
filters: {
includes: ["com.databricks.include"],
excludes: ["com.databricks.exclude"],
},
continuous: false,
});
import pulumi
import pulumi_databricks as databricks
dlt_demo = databricks.Notebook("dltDemo")
#...
this = databricks.Pipeline("this",
storage="/test/first-pipeline",
configuration={
"key1": "value1",
"key2": "value2",
},
clusters=[
databricks.PipelineClusterArgs(
label="default",
num_workers=2,
custom_tags={
"cluster_type": "default",
},
),
databricks.PipelineClusterArgs(
label="maintenance",
num_workers=1,
custom_tags={
"cluster_type": "maintenance",
},
),
],
libraries=[databricks.PipelineLibraryArgs(
notebook=databricks.PipelineLibraryNotebookArgs(
path=dlt_demo.id,
),
)],
filters=databricks.PipelineFiltersArgs(
includes=["com.databricks.include"],
excludes=["com.databricks.exclude"],
),
continuous=False)
Example coming soon!
Create Pipeline Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Pipeline(name: string, args: PipelineArgs, opts?: CustomResourceOptions);@overload
def Pipeline(resource_name: str,
args: PipelineArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Pipeline(resource_name: str,
opts: Optional[ResourceOptions] = None,
filters: Optional[PipelineFiltersArgs] = None,
allow_duplicate_names: Optional[bool] = None,
clusters: Optional[Sequence[PipelineClusterArgs]] = None,
configuration: Optional[Mapping[str, Any]] = None,
continuous: Optional[bool] = None,
libraries: Optional[Sequence[PipelineLibraryArgs]] = None,
name: Optional[str] = None,
storage: Optional[str] = None,
target: Optional[str] = None)func NewPipeline(ctx *Context, name string, args PipelineArgs, opts ...ResourceOption) (*Pipeline, error)public Pipeline(string name, PipelineArgs args, CustomResourceOptions? opts = null)
public Pipeline(String name, PipelineArgs args)
public Pipeline(String name, PipelineArgs args, CustomResourceOptions options)
type: databricks:Pipeline
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var pipelineResource = new Databricks.Pipeline("pipelineResource", new()
{
Filters = new Databricks.Inputs.PipelineFiltersArgs
{
Excludes = new[]
{
"string",
},
Includes = new[]
{
"string",
},
},
AllowDuplicateNames = false,
Clusters = new[]
{
new Databricks.Inputs.PipelineClusterArgs
{
Autoscale = new Databricks.Inputs.PipelineClusterAutoscaleArgs
{
MaxWorkers = 0,
MinWorkers = 0,
},
AwsAttributes = new Databricks.Inputs.PipelineClusterAwsAttributesArgs
{
InstanceProfileArn = "string",
ZoneId = "string",
},
ClusterLogConf = new Databricks.Inputs.PipelineClusterClusterLogConfArgs
{
Dbfs = new Databricks.Inputs.PipelineClusterClusterLogConfDbfsArgs
{
Destination = "string",
},
S3 = new Databricks.Inputs.PipelineClusterClusterLogConfS3Args
{
Destination = "string",
CannedAcl = "string",
EnableEncryption = false,
EncryptionType = "string",
Endpoint = "string",
KmsKey = "string",
Region = "string",
},
},
CustomTags =
{
{ "string", "any" },
},
DriverNodeTypeId = "string",
InitScripts = new[]
{
new Databricks.Inputs.PipelineClusterInitScriptArgs
{
Dbfs = new Databricks.Inputs.PipelineClusterInitScriptDbfsArgs
{
Destination = "string",
},
File = new Databricks.Inputs.PipelineClusterInitScriptFileArgs
{
Destination = "string",
},
S3 = new Databricks.Inputs.PipelineClusterInitScriptS3Args
{
Destination = "string",
CannedAcl = "string",
EnableEncryption = false,
EncryptionType = "string",
Endpoint = "string",
KmsKey = "string",
Region = "string",
},
},
},
InstancePoolId = "string",
Label = "string",
NodeTypeId = "string",
NumWorkers = 0,
SparkConf =
{
{ "string", "any" },
},
SparkEnvVars =
{
{ "string", "any" },
},
SshPublicKeys = new[]
{
"string",
},
},
},
Configuration =
{
{ "string", "any" },
},
Continuous = false,
Libraries = new[]
{
new Databricks.Inputs.PipelineLibraryArgs
{
Jar = "string",
Maven = new Databricks.Inputs.PipelineLibraryMavenArgs
{
Coordinates = "string",
Exclusions = new[]
{
"string",
},
Repo = "string",
},
Notebook = new Databricks.Inputs.PipelineLibraryNotebookArgs
{
Path = "string",
},
Whl = "string",
},
},
Name = "string",
Storage = "string",
Target = "string",
});
example, err := databricks.NewPipeline(ctx, "pipelineResource", &databricks.PipelineArgs{
Filters: &databricks.PipelineFiltersArgs{
Excludes: pulumi.StringArray{
pulumi.String("string"),
},
Includes: pulumi.StringArray{
pulumi.String("string"),
},
},
AllowDuplicateNames: pulumi.Bool(false),
Clusters: databricks.PipelineClusterArray{
&databricks.PipelineClusterArgs{
Autoscale: &databricks.PipelineClusterAutoscaleArgs{
MaxWorkers: pulumi.Int(0),
MinWorkers: pulumi.Int(0),
},
AwsAttributes: &databricks.PipelineClusterAwsAttributesArgs{
InstanceProfileArn: pulumi.String("string"),
ZoneId: pulumi.String("string"),
},
ClusterLogConf: &databricks.PipelineClusterClusterLogConfArgs{
Dbfs: &databricks.PipelineClusterClusterLogConfDbfsArgs{
Destination: pulumi.String("string"),
},
S3: &databricks.PipelineClusterClusterLogConfS3Args{
Destination: pulumi.String("string"),
CannedAcl: pulumi.String("string"),
EnableEncryption: pulumi.Bool(false),
EncryptionType: pulumi.String("string"),
Endpoint: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Region: pulumi.String("string"),
},
},
CustomTags: pulumi.Map{
"string": pulumi.Any("any"),
},
DriverNodeTypeId: pulumi.String("string"),
InitScripts: databricks.PipelineClusterInitScriptArray{
&databricks.PipelineClusterInitScriptArgs{
Dbfs: &databricks.PipelineClusterInitScriptDbfsArgs{
Destination: pulumi.String("string"),
},
File: &databricks.PipelineClusterInitScriptFileArgs{
Destination: pulumi.String("string"),
},
S3: &databricks.PipelineClusterInitScriptS3Args{
Destination: pulumi.String("string"),
CannedAcl: pulumi.String("string"),
EnableEncryption: pulumi.Bool(false),
EncryptionType: pulumi.String("string"),
Endpoint: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Region: pulumi.String("string"),
},
},
},
InstancePoolId: pulumi.String("string"),
Label: pulumi.String("string"),
NodeTypeId: pulumi.String("string"),
NumWorkers: pulumi.Int(0),
SparkConf: pulumi.Map{
"string": pulumi.Any("any"),
},
SparkEnvVars: pulumi.Map{
"string": pulumi.Any("any"),
},
SshPublicKeys: pulumi.StringArray{
pulumi.String("string"),
},
},
},
Configuration: pulumi.Map{
"string": pulumi.Any("any"),
},
Continuous: pulumi.Bool(false),
Libraries: databricks.PipelineLibraryArray{
&databricks.PipelineLibraryArgs{
Jar: pulumi.String("string"),
Maven: &databricks.PipelineLibraryMavenArgs{
Coordinates: pulumi.String("string"),
Exclusions: pulumi.StringArray{
pulumi.String("string"),
},
Repo: pulumi.String("string"),
},
Notebook: &databricks.PipelineLibraryNotebookArgs{
Path: pulumi.String("string"),
},
Whl: pulumi.String("string"),
},
},
Name: pulumi.String("string"),
Storage: pulumi.String("string"),
Target: pulumi.String("string"),
})
var pipelineResource = new Pipeline("pipelineResource", PipelineArgs.builder()
.filters(PipelineFiltersArgs.builder()
.excludes("string")
.includes("string")
.build())
.allowDuplicateNames(false)
.clusters(PipelineClusterArgs.builder()
.autoscale(PipelineClusterAutoscaleArgs.builder()
.maxWorkers(0)
.minWorkers(0)
.build())
.awsAttributes(PipelineClusterAwsAttributesArgs.builder()
.instanceProfileArn("string")
.zoneId("string")
.build())
.clusterLogConf(PipelineClusterClusterLogConfArgs.builder()
.dbfs(PipelineClusterClusterLogConfDbfsArgs.builder()
.destination("string")
.build())
.s3(PipelineClusterClusterLogConfS3Args.builder()
.destination("string")
.cannedAcl("string")
.enableEncryption(false)
.encryptionType("string")
.endpoint("string")
.kmsKey("string")
.region("string")
.build())
.build())
.customTags(Map.of("string", "any"))
.driverNodeTypeId("string")
.initScripts(PipelineClusterInitScriptArgs.builder()
.dbfs(PipelineClusterInitScriptDbfsArgs.builder()
.destination("string")
.build())
.file(PipelineClusterInitScriptFileArgs.builder()
.destination("string")
.build())
.s3(PipelineClusterInitScriptS3Args.builder()
.destination("string")
.cannedAcl("string")
.enableEncryption(false)
.encryptionType("string")
.endpoint("string")
.kmsKey("string")
.region("string")
.build())
.build())
.instancePoolId("string")
.label("string")
.nodeTypeId("string")
.numWorkers(0)
.sparkConf(Map.of("string", "any"))
.sparkEnvVars(Map.of("string", "any"))
.sshPublicKeys("string")
.build())
.configuration(Map.of("string", "any"))
.continuous(false)
.libraries(PipelineLibraryArgs.builder()
.jar("string")
.maven(PipelineLibraryMavenArgs.builder()
.coordinates("string")
.exclusions("string")
.repo("string")
.build())
.notebook(PipelineLibraryNotebookArgs.builder()
.path("string")
.build())
.whl("string")
.build())
.name("string")
.storage("string")
.target("string")
.build());
pipeline_resource = databricks.Pipeline("pipelineResource",
filters={
"excludes": ["string"],
"includes": ["string"],
},
allow_duplicate_names=False,
clusters=[{
"autoscale": {
"max_workers": 0,
"min_workers": 0,
},
"aws_attributes": {
"instance_profile_arn": "string",
"zone_id": "string",
},
"cluster_log_conf": {
"dbfs": {
"destination": "string",
},
"s3": {
"destination": "string",
"canned_acl": "string",
"enable_encryption": False,
"encryption_type": "string",
"endpoint": "string",
"kms_key": "string",
"region": "string",
},
},
"custom_tags": {
"string": "any",
},
"driver_node_type_id": "string",
"init_scripts": [{
"dbfs": {
"destination": "string",
},
"file": {
"destination": "string",
},
"s3": {
"destination": "string",
"canned_acl": "string",
"enable_encryption": False,
"encryption_type": "string",
"endpoint": "string",
"kms_key": "string",
"region": "string",
},
}],
"instance_pool_id": "string",
"label": "string",
"node_type_id": "string",
"num_workers": 0,
"spark_conf": {
"string": "any",
},
"spark_env_vars": {
"string": "any",
},
"ssh_public_keys": ["string"],
}],
configuration={
"string": "any",
},
continuous=False,
libraries=[{
"jar": "string",
"maven": {
"coordinates": "string",
"exclusions": ["string"],
"repo": "string",
},
"notebook": {
"path": "string",
},
"whl": "string",
}],
name="string",
storage="string",
target="string")
const pipelineResource = new databricks.Pipeline("pipelineResource", {
filters: {
excludes: ["string"],
includes: ["string"],
},
allowDuplicateNames: false,
clusters: [{
autoscale: {
maxWorkers: 0,
minWorkers: 0,
},
awsAttributes: {
instanceProfileArn: "string",
zoneId: "string",
},
clusterLogConf: {
dbfs: {
destination: "string",
},
s3: {
destination: "string",
cannedAcl: "string",
enableEncryption: false,
encryptionType: "string",
endpoint: "string",
kmsKey: "string",
region: "string",
},
},
customTags: {
string: "any",
},
driverNodeTypeId: "string",
initScripts: [{
dbfs: {
destination: "string",
},
file: {
destination: "string",
},
s3: {
destination: "string",
cannedAcl: "string",
enableEncryption: false,
encryptionType: "string",
endpoint: "string",
kmsKey: "string",
region: "string",
},
}],
instancePoolId: "string",
label: "string",
nodeTypeId: "string",
numWorkers: 0,
sparkConf: {
string: "any",
},
sparkEnvVars: {
string: "any",
},
sshPublicKeys: ["string"],
}],
configuration: {
string: "any",
},
continuous: false,
libraries: [{
jar: "string",
maven: {
coordinates: "string",
exclusions: ["string"],
repo: "string",
},
notebook: {
path: "string",
},
whl: "string",
}],
name: "string",
storage: "string",
target: "string",
});
type: databricks:Pipeline
properties:
allowDuplicateNames: false
clusters:
- autoscale:
maxWorkers: 0
minWorkers: 0
awsAttributes:
instanceProfileArn: string
zoneId: string
clusterLogConf:
dbfs:
destination: string
s3:
cannedAcl: string
destination: string
enableEncryption: false
encryptionType: string
endpoint: string
kmsKey: string
region: string
customTags:
string: any
driverNodeTypeId: string
initScripts:
- dbfs:
destination: string
file:
destination: string
s3:
cannedAcl: string
destination: string
enableEncryption: false
encryptionType: string
endpoint: string
kmsKey: string
region: string
instancePoolId: string
label: string
nodeTypeId: string
numWorkers: 0
sparkConf:
string: any
sparkEnvVars:
string: any
sshPublicKeys:
- string
configuration:
string: any
continuous: false
filters:
excludes:
- string
includes:
- string
libraries:
- jar: string
maven:
coordinates: string
exclusions:
- string
repo: string
notebook:
path: string
whl: string
name: string
storage: string
target: string
Pipeline Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Pipeline resource accepts the following input properties:
- Filters
Pipeline
Filters - Allow
Duplicate boolNames - Clusters
List<Pipeline
Cluster> - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
- Configuration Dictionary<string, object>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is
false. - Libraries
List<Pipeline
Library> - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebooktype of library that should havepathattribute. - Name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Storage string
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
- Target string
- The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Filters
Pipeline
Filters Args - Allow
Duplicate boolNames - Clusters
[]Pipeline
Cluster Args - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
- Configuration map[string]interface{}
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is
false. - Libraries
[]Pipeline
Library Args - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebooktype of library that should havepathattribute. - Name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Storage string
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
- Target string
- The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- filters
Pipeline
Filters - allow
Duplicate BooleanNames - clusters
List<Pipeline
Cluster> - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
- configuration Map<String,Object>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
- A flag indicating whether to run the pipeline continuously. The default value is
false. - libraries
List<Pipeline
Library> - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebooktype of library that should havepathattribute. - name String
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- storage String
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
- target String
- The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- filters
Pipeline
Filters - allow
Duplicate booleanNames - clusters
Pipeline
Cluster[] - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
- configuration {[key: string]: any}
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous boolean
- A flag indicating whether to run the pipeline continuously. The default value is
false. - libraries
Pipeline
Library[] - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebooktype of library that should havepathattribute. - name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- storage string
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
- target string
- The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- filters
Pipeline
Filters Args - allow_
duplicate_ boolnames - clusters
Sequence[Pipeline
Cluster Args] - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
- configuration Mapping[str, Any]
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is
false. - libraries
Sequence[Pipeline
Library Args] - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebooktype of library that should havepathattribute. - name str
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- storage str
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
- target str
- The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- filters Property Map
- allow
Duplicate BooleanNames - clusters List<Property Map>
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
- configuration Map<Any>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
- A flag indicating whether to run the pipeline continuously. The default value is
false. - libraries List<Property Map>
- blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebooktype of library that should havepathattribute. - name String
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- storage String
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
- target String
- The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
Outputs
All input properties are implicitly available as output properties. Additionally, the Pipeline resource produces the following output properties:
Look up Existing Pipeline Resource
Get an existing Pipeline resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: PipelineState, opts?: CustomResourceOptions): Pipeline@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
allow_duplicate_names: Optional[bool] = None,
clusters: Optional[Sequence[PipelineClusterArgs]] = None,
configuration: Optional[Mapping[str, Any]] = None,
continuous: Optional[bool] = None,
filters: Optional[PipelineFiltersArgs] = None,
libraries: Optional[Sequence[PipelineLibraryArgs]] = None,
name: Optional[str] = None,
storage: Optional[str] = None,
target: Optional[str] = None,
url: Optional[str] = None) -> Pipelinefunc GetPipeline(ctx *Context, name string, id IDInput, state *PipelineState, opts ...ResourceOption) (*Pipeline, error)public static Pipeline Get(string name, Input<string> id, PipelineState? state, CustomResourceOptions? opts = null)public static Pipeline get(String name, Output<String> id, PipelineState state, CustomResourceOptions options)resources: _: type: databricks:Pipeline get: id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Allow
Duplicate boolNames - Clusters
List<Pipeline
Cluster> - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
- Configuration Dictionary<string, object>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is
false. - Filters
Pipeline
Filters - Libraries
List<Pipeline
Library> - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebooktype of library that should havepathattribute. - Name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Storage string
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
- Target string
- The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Url string
- Allow
Duplicate boolNames - Clusters
[]Pipeline
Cluster Args - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
- Configuration map[string]interface{}
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is
false. - Filters
Pipeline
Filters Args - Libraries
[]Pipeline
Library Args - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebooktype of library that should havepathattribute. - Name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Storage string
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
- Target string
- The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Url string
- allow
Duplicate BooleanNames - clusters
List<Pipeline
Cluster> - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
- configuration Map<String,Object>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
- A flag indicating whether to run the pipeline continuously. The default value is
false. - filters
Pipeline
Filters - libraries
List<Pipeline
Library> - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebooktype of library that should havepathattribute. - name String
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- storage String
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
- target String
- The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- url String
- allow
Duplicate booleanNames - clusters
Pipeline
Cluster[] - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
- configuration {[key: string]: any}
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous boolean
- A flag indicating whether to run the pipeline continuously. The default value is
false. - filters
Pipeline
Filters - libraries
Pipeline
Library[] - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebooktype of library that should havepathattribute. - name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- storage string
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
- target string
- The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- url string
- allow_
duplicate_ boolnames - clusters
Sequence[Pipeline
Cluster Args] - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
- configuration Mapping[str, Any]
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is
false. - filters
Pipeline
Filters Args - libraries
Sequence[Pipeline
Library Args] - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebooktype of library that should havepathattribute. - name str
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- storage str
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
- target str
- The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- url str
- allow
Duplicate BooleanNames - clusters List<Property Map>
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
- configuration Map<Any>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
- A flag indicating whether to run the pipeline continuously. The default value is
false. - filters Property Map
- libraries List<Property Map>
- blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebooktype of library that should havepathattribute. - name String
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- storage String
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
- target String
- The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- url String
Supporting Types
PipelineCluster, PipelineClusterArgs
- Autoscale
Pipeline
Cluster Autoscale - Aws
Attributes PipelineCluster Aws Attributes - Cluster
Log PipelineConf Cluster Cluster Log Conf - Dictionary<string, object>
- Driver
Node stringType Id - Init
Scripts List<PipelineCluster Init Script> - Instance
Pool stringId - Label string
- Node
Type stringId - Num
Workers int - Spark
Conf Dictionary<string, object> - Spark
Env Dictionary<string, object>Vars - Ssh
Public List<string>Keys
- Autoscale
Pipeline
Cluster Autoscale - Aws
Attributes PipelineCluster Aws Attributes - Cluster
Log PipelineConf Cluster Cluster Log Conf - map[string]interface{}
- Driver
Node stringType Id - Init
Scripts []PipelineCluster Init Script - Instance
Pool stringId - Label string
- Node
Type stringId - Num
Workers int - Spark
Conf map[string]interface{} - Spark
Env map[string]interface{}Vars - Ssh
Public []stringKeys
- autoscale
Pipeline
Cluster Autoscale - aws
Attributes PipelineCluster Aws Attributes - cluster
Log PipelineConf Cluster Cluster Log Conf - Map<String,Object>
- driver
Node StringType Id - init
Scripts List<PipelineCluster Init Script> - instance
Pool StringId - label String
- node
Type StringId - num
Workers Integer - spark
Conf Map<String,Object> - spark
Env Map<String,Object>Vars - ssh
Public List<String>Keys
- autoscale
Pipeline
Cluster Autoscale - aws
Attributes PipelineCluster Aws Attributes - cluster
Log PipelineConf Cluster Cluster Log Conf - {[key: string]: any}
- driver
Node stringType Id - init
Scripts PipelineCluster Init Script[] - instance
Pool stringId - label string
- node
Type stringId - num
Workers number - spark
Conf {[key: string]: any} - spark
Env {[key: string]: any}Vars - ssh
Public string[]Keys
- autoscale
Pipeline
Cluster Autoscale - aws_
attributes PipelineCluster Aws Attributes - cluster_
log_ Pipelineconf Cluster Cluster Log Conf - Mapping[str, Any]
- driver_
node_ strtype_ id - init_
scripts Sequence[PipelineCluster Init Script] - instance_
pool_ strid - label str
- node_
type_ strid - num_
workers int - spark_
conf Mapping[str, Any] - spark_
env_ Mapping[str, Any]vars - ssh_
public_ Sequence[str]keys
- autoscale Property Map
- aws
Attributes Property Map - cluster
Log Property MapConf - Map<Any>
- driver
Node StringType Id - init
Scripts List<Property Map> - instance
Pool StringId - label String
- node
Type StringId - num
Workers Number - spark
Conf Map<Any> - spark
Env Map<Any>Vars - ssh
Public List<String>Keys
PipelineClusterAutoscale, PipelineClusterAutoscaleArgs
- Max
Workers int - Min
Workers int
- Max
Workers int - Min
Workers int
- max
Workers Integer - min
Workers Integer
- max
Workers number - min
Workers number
- max_
workers int - min_
workers int
- max
Workers Number - min
Workers Number
PipelineClusterAwsAttributes, PipelineClusterAwsAttributesArgs
- Instance
Profile stringArn - Zone
Id string
- Instance
Profile stringArn - Zone
Id string
- instance
Profile StringArn - zone
Id String
- instance
Profile stringArn - zone
Id string
- instance_
profile_ strarn - zone_
id str
- instance
Profile StringArn - zone
Id String
PipelineClusterClusterLogConf, PipelineClusterClusterLogConfArgs
PipelineClusterClusterLogConfDbfs, PipelineClusterClusterLogConfDbfsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterClusterLogConfS3, PipelineClusterClusterLogConfS3Args
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
PipelineClusterInitScript, PipelineClusterInitScriptArgs
PipelineClusterInitScriptDbfs, PipelineClusterInitScriptDbfsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptFile, PipelineClusterInitScriptFileArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptS3, PipelineClusterInitScriptS3Args
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
PipelineFilters, PipelineFiltersArgs
PipelineLibrary, PipelineLibraryArgs
- Jar string
- Maven
Pipeline
Library Maven - Notebook
Pipeline
Library Notebook - Whl string
- Jar string
- Maven
Pipeline
Library Maven - Notebook
Pipeline
Library Notebook - Whl string
- jar String
- maven
Pipeline
Library Maven - notebook
Pipeline
Library Notebook - whl String
- jar string
- maven
Pipeline
Library Maven - notebook
Pipeline
Library Notebook - whl string
- jar String
- maven Property Map
- notebook Property Map
- whl String
PipelineLibraryMaven, PipelineLibraryMavenArgs
- Coordinates string
- Exclusions List<string>
- Repo string
- Coordinates string
- Exclusions []string
- Repo string
- coordinates String
- exclusions List<String>
- repo String
- coordinates string
- exclusions string[]
- repo string
- coordinates str
- exclusions Sequence[str]
- repo str
- coordinates String
- exclusions List<String>
- repo String
PipelineLibraryNotebook, PipelineLibraryNotebookArgs
- Path string
- Path string
- path String
- path string
- path str
- path String
Import
The resource job can be imported using the id of the pipeline bash
$ pulumi import databricks:index/pipeline:Pipeline this <pipeline-id>
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
databricksTerraform Provider.
published on Monday, Mar 9, 2026 by Pulumi
