databricks logo
Databricks v1.14.0, May 23 23

databricks.Pipeline

Explore with Pulumi AI

Use databricks.Pipeline to deploy Delta Live Tables.

The following resources are often used in the same context:

Example Usage

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var dltDemoNotebook = new Databricks.Notebook("dltDemoNotebook");

    //...
    var dltDemoRepo = new Databricks.Repo("dltDemoRepo");

    //...
    var @this = new Databricks.Pipeline("this", new()
    {
        Storage = "/test/first-pipeline",
        Configuration = 
        {
            { "key1", "value1" },
            { "key2", "value2" },
        },
        Clusters = new[]
        {
            new Databricks.Inputs.PipelineClusterArgs
            {
                Label = "default",
                NumWorkers = 2,
                CustomTags = 
                {
                    { "cluster_type", "default" },
                },
            },
            new Databricks.Inputs.PipelineClusterArgs
            {
                Label = "maintenance",
                NumWorkers = 1,
                CustomTags = 
                {
                    { "cluster_type", "maintenance" },
                },
            },
        },
        Libraries = new[]
        {
            new Databricks.Inputs.PipelineLibraryArgs
            {
                Notebook = new Databricks.Inputs.PipelineLibraryNotebookArgs
                {
                    Path = dltDemoNotebook.Id,
                },
            },
            new Databricks.Inputs.PipelineLibraryArgs
            {
                File = new Databricks.Inputs.PipelineLibraryFileArgs
                {
                    Path = dltDemoRepo.Path.Apply(path => $"{path}/pipeline.sql"),
                },
            },
        },
        Continuous = false,
        Notifications = new[]
        {
            new Databricks.Inputs.PipelineNotificationArgs
            {
                EmailRecipients = new[]
                {
                    "user@domain.com",
                    "user1@domain.com",
                },
                Alerts = new[]
                {
                    "on-update-failure",
                    "on-update-fatal-failure",
                    "on-update-success",
                    "on-flow-failure",
                },
            },
        },
    });

});
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		dltDemoNotebook, err := databricks.NewNotebook(ctx, "dltDemoNotebook", nil)
		if err != nil {
			return err
		}
		dltDemoRepo, err := databricks.NewRepo(ctx, "dltDemoRepo", nil)
		if err != nil {
			return err
		}
		_, err = databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{
			Storage: pulumi.String("/test/first-pipeline"),
			Configuration: pulumi.AnyMap{
				"key1": pulumi.Any("value1"),
				"key2": pulumi.Any("value2"),
			},
			Clusters: databricks.PipelineClusterArray{
				&databricks.PipelineClusterArgs{
					Label:      pulumi.String("default"),
					NumWorkers: pulumi.Int(2),
					CustomTags: pulumi.AnyMap{
						"cluster_type": pulumi.Any("default"),
					},
				},
				&databricks.PipelineClusterArgs{
					Label:      pulumi.String("maintenance"),
					NumWorkers: pulumi.Int(1),
					CustomTags: pulumi.AnyMap{
						"cluster_type": pulumi.Any("maintenance"),
					},
				},
			},
			Libraries: databricks.PipelineLibraryArray{
				&databricks.PipelineLibraryArgs{
					Notebook: &databricks.PipelineLibraryNotebookArgs{
						Path: dltDemoNotebook.ID(),
					},
				},
				&databricks.PipelineLibraryArgs{
					File: &databricks.PipelineLibraryFileArgs{
						Path: dltDemoRepo.Path.ApplyT(func(path string) (string, error) {
							return fmt.Sprintf("%v/pipeline.sql", path), nil
						}).(pulumi.StringOutput),
					},
				},
			},
			Continuous: pulumi.Bool(false),
			Notifications: databricks.PipelineNotificationArray{
				&databricks.PipelineNotificationArgs{
					EmailRecipients: pulumi.StringArray{
						pulumi.String("user@domain.com"),
						pulumi.String("user1@domain.com"),
					},
					Alerts: pulumi.StringArray{
						pulumi.String("on-update-failure"),
						pulumi.String("on-update-fatal-failure"),
						pulumi.String("on-update-success"),
						pulumi.String("on-flow-failure"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Notebook;
import com.pulumi.databricks.Repo;
import com.pulumi.databricks.Pipeline;
import com.pulumi.databricks.PipelineArgs;
import com.pulumi.databricks.inputs.PipelineClusterArgs;
import com.pulumi.databricks.inputs.PipelineLibraryArgs;
import com.pulumi.databricks.inputs.PipelineLibraryNotebookArgs;
import com.pulumi.databricks.inputs.PipelineLibraryFileArgs;
import com.pulumi.databricks.inputs.PipelineNotificationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var dltDemoNotebook = new Notebook("dltDemoNotebook");

        var dltDemoRepo = new Repo("dltDemoRepo");

        var this_ = new Pipeline("this", PipelineArgs.builder()        
            .storage("/test/first-pipeline")
            .configuration(Map.ofEntries(
                Map.entry("key1", "value1"),
                Map.entry("key2", "value2")
            ))
            .clusters(            
                PipelineClusterArgs.builder()
                    .label("default")
                    .numWorkers(2)
                    .customTags(Map.of("cluster_type", "default"))
                    .build(),
                PipelineClusterArgs.builder()
                    .label("maintenance")
                    .numWorkers(1)
                    .customTags(Map.of("cluster_type", "maintenance"))
                    .build())
            .libraries(            
                PipelineLibraryArgs.builder()
                    .notebook(PipelineLibraryNotebookArgs.builder()
                        .path(dltDemoNotebook.id())
                        .build())
                    .build(),
                PipelineLibraryArgs.builder()
                    .file(PipelineLibraryFileArgs.builder()
                        .path(dltDemoRepo.path().applyValue(path -> String.format("%s/pipeline.sql", path)))
                        .build())
                    .build())
            .continuous(false)
            .notifications(PipelineNotificationArgs.builder()
                .emailRecipients(                
                    "user@domain.com",
                    "user1@domain.com")
                .alerts(                
                    "on-update-failure",
                    "on-update-fatal-failure",
                    "on-update-success",
                    "on-flow-failure")
                .build())
            .build());

    }
}
import pulumi
import pulumi_databricks as databricks

dlt_demo_notebook = databricks.Notebook("dltDemoNotebook")
#...
dlt_demo_repo = databricks.Repo("dltDemoRepo")
#...
this = databricks.Pipeline("this",
    storage="/test/first-pipeline",
    configuration={
        "key1": "value1",
        "key2": "value2",
    },
    clusters=[
        databricks.PipelineClusterArgs(
            label="default",
            num_workers=2,
            custom_tags={
                "cluster_type": "default",
            },
        ),
        databricks.PipelineClusterArgs(
            label="maintenance",
            num_workers=1,
            custom_tags={
                "cluster_type": "maintenance",
            },
        ),
    ],
    libraries=[
        databricks.PipelineLibraryArgs(
            notebook=databricks.PipelineLibraryNotebookArgs(
                path=dlt_demo_notebook.id,
            ),
        ),
        databricks.PipelineLibraryArgs(
            file=databricks.PipelineLibraryFileArgs(
                path=dlt_demo_repo.path.apply(lambda path: f"{path}/pipeline.sql"),
            ),
        ),
    ],
    continuous=False,
    notifications=[databricks.PipelineNotificationArgs(
        email_recipients=[
            "user@domain.com",
            "user1@domain.com",
        ],
        alerts=[
            "on-update-failure",
            "on-update-fatal-failure",
            "on-update-success",
            "on-flow-failure",
        ],
    )])
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const dltDemoNotebook = new databricks.Notebook("dltDemoNotebook", {});
//...
const dltDemoRepo = new databricks.Repo("dltDemoRepo", {});
//...
const _this = new databricks.Pipeline("this", {
    storage: "/test/first-pipeline",
    configuration: {
        key1: "value1",
        key2: "value2",
    },
    clusters: [
        {
            label: "default",
            numWorkers: 2,
            customTags: {
                cluster_type: "default",
            },
        },
        {
            label: "maintenance",
            numWorkers: 1,
            customTags: {
                cluster_type: "maintenance",
            },
        },
    ],
    libraries: [
        {
            notebook: {
                path: dltDemoNotebook.id,
            },
        },
        {
            file: {
                path: pulumi.interpolate`${dltDemoRepo.path}/pipeline.sql`,
            },
        },
    ],
    continuous: false,
    notifications: [{
        emailRecipients: [
            "user@domain.com",
            "user1@domain.com",
        ],
        alerts: [
            "on-update-failure",
            "on-update-fatal-failure",
            "on-update-success",
            "on-flow-failure",
        ],
    }],
});
resources:
  dltDemoNotebook:
    type: databricks:Notebook
  dltDemoRepo:
    type: databricks:Repo
  this:
    type: databricks:Pipeline
    properties:
      storage: /test/first-pipeline
      configuration:
        key1: value1
        key2: value2
      clusters:
        - label: default
          numWorkers: 2
          customTags:
            cluster_type: default
        - label: maintenance
          numWorkers: 1
          customTags:
            cluster_type: maintenance
      libraries:
        - notebook:
            path: ${dltDemoNotebook.id}
        - file:
            path: ${dltDemoRepo.path}/pipeline.sql
      continuous: false
      notifications:
        - emailRecipients:
            - user@domain.com
            - user1@domain.com
          alerts:
            - on-update-failure
            - on-update-fatal-failure
            - on-update-success
            - on-flow-failure

Create Pipeline Resource

new Pipeline(name: string, args?: PipelineArgs, opts?: CustomResourceOptions);
@overload
def Pipeline(resource_name: str,
             opts: Optional[ResourceOptions] = None,
             allow_duplicate_names: Optional[bool] = None,
             catalog: Optional[str] = None,
             channel: Optional[str] = None,
             clusters: Optional[Sequence[PipelineClusterArgs]] = None,
             configuration: Optional[Mapping[str, Any]] = None,
             continuous: Optional[bool] = None,
             development: Optional[bool] = None,
             edition: Optional[str] = None,
             filters: Optional[PipelineFiltersArgs] = None,
             libraries: Optional[Sequence[PipelineLibraryArgs]] = None,
             name: Optional[str] = None,
             notifications: Optional[Sequence[PipelineNotificationArgs]] = None,
             photon: Optional[bool] = None,
             serverless: Optional[bool] = None,
             storage: Optional[str] = None,
             target: Optional[str] = None)
@overload
def Pipeline(resource_name: str,
             args: Optional[PipelineArgs] = None,
             opts: Optional[ResourceOptions] = None)
func NewPipeline(ctx *Context, name string, args *PipelineArgs, opts ...ResourceOption) (*Pipeline, error)
public Pipeline(string name, PipelineArgs? args = null, CustomResourceOptions? opts = null)
public Pipeline(String name, PipelineArgs args)
public Pipeline(String name, PipelineArgs args, CustomResourceOptions options)
type: databricks:Pipeline
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

name string
The unique name of the resource.
args PipelineArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name str
The unique name of the resource.
args PipelineArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name string
The unique name of the resource.
args PipelineArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name string
The unique name of the resource.
args PipelineArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name String
The unique name of the resource.
args PipelineArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Pipeline Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

The Pipeline resource accepts the following input properties:

AllowDuplicateNames bool
Catalog string

The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).

Channel string

optional name of the release channel for Spark version used by DLT pipeline. Supported values are: CURRENT (default) and PREVIEW.

Clusters List<PipelineClusterArgs>

blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscale block is extended with the mode parameter that controls the autoscaling algorithm (possible values are ENHANCED for new, enhanced autoscaling algorithm, or LEGACY for old algorithm).

Configuration Dictionary<string, object>

An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.

Continuous bool

A flag indicating whether to run the pipeline continuously. The default value is false.

Development bool

A flag indicating whether to run the pipeline in development mode. The default value is true.

Edition string

optional name of the product edition. Supported values are: CORE, PRO, ADVANCED (default).

Filters PipelineFiltersArgs
Libraries List<PipelineLibraryArgs>

blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook & file library types that should have the path attribute. Right now only the notebook & file types are supported.

Name string

A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.

Notifications List<PipelineNotificationArgs>
Photon bool

A flag indicating whether to use Photon engine. The default value is false.

Serverless bool
Storage string

A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).

Target string

The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

AllowDuplicateNames bool
Catalog string

The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).

Channel string

optional name of the release channel for Spark version used by DLT pipeline. Supported values are: CURRENT (default) and PREVIEW.

Clusters []PipelineClusterArgs

blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscale block is extended with the mode parameter that controls the autoscaling algorithm (possible values are ENHANCED for new, enhanced autoscaling algorithm, or LEGACY for old algorithm).

Configuration map[string]interface{}

An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.

Continuous bool

A flag indicating whether to run the pipeline continuously. The default value is false.

Development bool

A flag indicating whether to run the pipeline in development mode. The default value is true.

Edition string

optional name of the product edition. Supported values are: CORE, PRO, ADVANCED (default).

Filters PipelineFiltersArgs
Libraries []PipelineLibraryArgs

blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook & file library types that should have the path attribute. Right now only the notebook & file types are supported.

Name string

A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.

Notifications []PipelineNotificationArgs
Photon bool

A flag indicating whether to use Photon engine. The default value is false.

Serverless bool
Storage string

A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).

Target string

The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

allowDuplicateNames Boolean
catalog String

The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).

channel String

optional name of the release channel for Spark version used by DLT pipeline. Supported values are: CURRENT (default) and PREVIEW.

clusters List<PipelineClusterArgs>

blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscale block is extended with the mode parameter that controls the autoscaling algorithm (possible values are ENHANCED for new, enhanced autoscaling algorithm, or LEGACY for old algorithm).

configuration Map<String,Object>

An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.

continuous Boolean

A flag indicating whether to run the pipeline continuously. The default value is false.

development Boolean

A flag indicating whether to run the pipeline in development mode. The default value is true.

edition String

optional name of the product edition. Supported values are: CORE, PRO, ADVANCED (default).

filters PipelineFiltersArgs
libraries List<PipelineLibraryArgs>

blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook & file library types that should have the path attribute. Right now only the notebook & file types are supported.

name String

A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.

notifications List<PipelineNotificationArgs>
photon Boolean

A flag indicating whether to use Photon engine. The default value is false.

serverless Boolean
storage String

A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).

target String

The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

allowDuplicateNames boolean
catalog string

The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).

channel string

optional name of the release channel for Spark version used by DLT pipeline. Supported values are: CURRENT (default) and PREVIEW.

clusters PipelineClusterArgs[]

blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscale block is extended with the mode parameter that controls the autoscaling algorithm (possible values are ENHANCED for new, enhanced autoscaling algorithm, or LEGACY for old algorithm).

configuration {[key: string]: any}

An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.

continuous boolean

A flag indicating whether to run the pipeline continuously. The default value is false.

development boolean

A flag indicating whether to run the pipeline in development mode. The default value is true.

edition string

optional name of the product edition. Supported values are: CORE, PRO, ADVANCED (default).

filters PipelineFiltersArgs
libraries PipelineLibraryArgs[]

blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook & file library types that should have the path attribute. Right now only the notebook & file types are supported.

name string

A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.

notifications PipelineNotificationArgs[]
photon boolean

A flag indicating whether to use Photon engine. The default value is false.

serverless boolean
storage string

A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).

target string

The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

allow_duplicate_names bool
catalog str

The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).

channel str

optional name of the release channel for Spark version used by DLT pipeline. Supported values are: CURRENT (default) and PREVIEW.

clusters Sequence[PipelineClusterArgs]

blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscale block is extended with the mode parameter that controls the autoscaling algorithm (possible values are ENHANCED for new, enhanced autoscaling algorithm, or LEGACY for old algorithm).

configuration Mapping[str, Any]

An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.

continuous bool

A flag indicating whether to run the pipeline continuously. The default value is false.

development bool

A flag indicating whether to run the pipeline in development mode. The default value is true.

edition str

optional name of the product edition. Supported values are: CORE, PRO, ADVANCED (default).

filters PipelineFiltersArgs
libraries Sequence[PipelineLibraryArgs]

blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook & file library types that should have the path attribute. Right now only the notebook & file types are supported.

name str

A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.

notifications Sequence[PipelineNotificationArgs]
photon bool

A flag indicating whether to use Photon engine. The default value is false.

serverless bool
storage str

A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).

target str

The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

allowDuplicateNames Boolean
catalog String

The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).

channel String

optional name of the release channel for Spark version used by DLT pipeline. Supported values are: CURRENT (default) and PREVIEW.

clusters List<Property Map>

blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscale block is extended with the mode parameter that controls the autoscaling algorithm (possible values are ENHANCED for new, enhanced autoscaling algorithm, or LEGACY for old algorithm).

configuration Map<Any>

An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.

continuous Boolean

A flag indicating whether to run the pipeline continuously. The default value is false.

development Boolean

A flag indicating whether to run the pipeline in development mode. The default value is true.

edition String

optional name of the product edition. Supported values are: CORE, PRO, ADVANCED (default).

filters Property Map
libraries List<Property Map>

blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook & file library types that should have the path attribute. Right now only the notebook & file types are supported.

name String

A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.

notifications List<Property Map>
photon Boolean

A flag indicating whether to use Photon engine. The default value is false.

serverless Boolean
storage String

A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).

target String

The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

Outputs

All input properties are implicitly available as output properties. Additionally, the Pipeline resource produces the following output properties:

Id string

The provider-assigned unique ID for this managed resource.

Url string
Id string

The provider-assigned unique ID for this managed resource.

Url string
id String

The provider-assigned unique ID for this managed resource.

url String
id string

The provider-assigned unique ID for this managed resource.

url string
id str

The provider-assigned unique ID for this managed resource.

url str
id String

The provider-assigned unique ID for this managed resource.

url String

Look up Existing Pipeline Resource

Get an existing Pipeline resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: PipelineState, opts?: CustomResourceOptions): Pipeline
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        allow_duplicate_names: Optional[bool] = None,
        catalog: Optional[str] = None,
        channel: Optional[str] = None,
        clusters: Optional[Sequence[PipelineClusterArgs]] = None,
        configuration: Optional[Mapping[str, Any]] = None,
        continuous: Optional[bool] = None,
        development: Optional[bool] = None,
        edition: Optional[str] = None,
        filters: Optional[PipelineFiltersArgs] = None,
        libraries: Optional[Sequence[PipelineLibraryArgs]] = None,
        name: Optional[str] = None,
        notifications: Optional[Sequence[PipelineNotificationArgs]] = None,
        photon: Optional[bool] = None,
        serverless: Optional[bool] = None,
        storage: Optional[str] = None,
        target: Optional[str] = None,
        url: Optional[str] = None) -> Pipeline
func GetPipeline(ctx *Context, name string, id IDInput, state *PipelineState, opts ...ResourceOption) (*Pipeline, error)
public static Pipeline Get(string name, Input<string> id, PipelineState? state, CustomResourceOptions? opts = null)
public static Pipeline get(String name, Output<String> id, PipelineState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
AllowDuplicateNames bool
Catalog string

The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).

Channel string

optional name of the release channel for Spark version used by DLT pipeline. Supported values are: CURRENT (default) and PREVIEW.

Clusters List<PipelineClusterArgs>

blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscale block is extended with the mode parameter that controls the autoscaling algorithm (possible values are ENHANCED for new, enhanced autoscaling algorithm, or LEGACY for old algorithm).

Configuration Dictionary<string, object>

An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.

Continuous bool

A flag indicating whether to run the pipeline continuously. The default value is false.

Development bool

A flag indicating whether to run the pipeline in development mode. The default value is true.

Edition string

optional name of the product edition. Supported values are: CORE, PRO, ADVANCED (default).

Filters PipelineFiltersArgs
Libraries List<PipelineLibraryArgs>

blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook & file library types that should have the path attribute. Right now only the notebook & file types are supported.

Name string

A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.

Notifications List<PipelineNotificationArgs>
Photon bool

A flag indicating whether to use Photon engine. The default value is false.

Serverless bool
Storage string

A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).

Target string

The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

Url string
AllowDuplicateNames bool
Catalog string

The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).

Channel string

optional name of the release channel for Spark version used by DLT pipeline. Supported values are: CURRENT (default) and PREVIEW.

Clusters []PipelineClusterArgs

blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscale block is extended with the mode parameter that controls the autoscaling algorithm (possible values are ENHANCED for new, enhanced autoscaling algorithm, or LEGACY for old algorithm).

Configuration map[string]interface{}

An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.

Continuous bool

A flag indicating whether to run the pipeline continuously. The default value is false.

Development bool

A flag indicating whether to run the pipeline in development mode. The default value is true.

Edition string

optional name of the product edition. Supported values are: CORE, PRO, ADVANCED (default).

Filters PipelineFiltersArgs
Libraries []PipelineLibraryArgs

blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook & file library types that should have the path attribute. Right now only the notebook & file types are supported.

Name string

A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.

Notifications []PipelineNotificationArgs
Photon bool

A flag indicating whether to use Photon engine. The default value is false.

Serverless bool
Storage string

A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).

Target string

The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

Url string
allowDuplicateNames Boolean
catalog String

The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).

channel String

optional name of the release channel for Spark version used by DLT pipeline. Supported values are: CURRENT (default) and PREVIEW.

clusters List<PipelineClusterArgs>

blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscale block is extended with the mode parameter that controls the autoscaling algorithm (possible values are ENHANCED for new, enhanced autoscaling algorithm, or LEGACY for old algorithm).

configuration Map<String,Object>

An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.

continuous Boolean

A flag indicating whether to run the pipeline continuously. The default value is false.

development Boolean

A flag indicating whether to run the pipeline in development mode. The default value is true.

edition String

optional name of the product edition. Supported values are: CORE, PRO, ADVANCED (default).

filters PipelineFiltersArgs
libraries List<PipelineLibraryArgs>

blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook & file library types that should have the path attribute. Right now only the notebook & file types are supported.

name String

A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.

notifications List<PipelineNotificationArgs>
photon Boolean

A flag indicating whether to use Photon engine. The default value is false.

serverless Boolean
storage String

A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).

target String

The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

url String
allowDuplicateNames boolean
catalog string

The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).

channel string

optional name of the release channel for Spark version used by DLT pipeline. Supported values are: CURRENT (default) and PREVIEW.

clusters PipelineClusterArgs[]

blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscale block is extended with the mode parameter that controls the autoscaling algorithm (possible values are ENHANCED for new, enhanced autoscaling algorithm, or LEGACY for old algorithm).

configuration {[key: string]: any}

An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.

continuous boolean

A flag indicating whether to run the pipeline continuously. The default value is false.

development boolean

A flag indicating whether to run the pipeline in development mode. The default value is true.

edition string

optional name of the product edition. Supported values are: CORE, PRO, ADVANCED (default).

filters PipelineFiltersArgs
libraries PipelineLibraryArgs[]

blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook & file library types that should have the path attribute. Right now only the notebook & file types are supported.

name string

A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.

notifications PipelineNotificationArgs[]
photon boolean

A flag indicating whether to use Photon engine. The default value is false.

serverless boolean
storage string

A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).

target string

The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

url string
allow_duplicate_names bool
catalog str

The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).

channel str

optional name of the release channel for Spark version used by DLT pipeline. Supported values are: CURRENT (default) and PREVIEW.

clusters Sequence[PipelineClusterArgs]

blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscale block is extended with the mode parameter that controls the autoscaling algorithm (possible values are ENHANCED for new, enhanced autoscaling algorithm, or LEGACY for old algorithm).

configuration Mapping[str, Any]

An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.

continuous bool

A flag indicating whether to run the pipeline continuously. The default value is false.

development bool

A flag indicating whether to run the pipeline in development mode. The default value is true.

edition str

optional name of the product edition. Supported values are: CORE, PRO, ADVANCED (default).

filters PipelineFiltersArgs
libraries Sequence[PipelineLibraryArgs]

blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook & file library types that should have the path attribute. Right now only the notebook & file types are supported.

name str

A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.

notifications Sequence[PipelineNotificationArgs]
photon bool

A flag indicating whether to use Photon engine. The default value is false.

serverless bool
storage str

A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).

target str

The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

url str
allowDuplicateNames Boolean
catalog String

The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).

channel String

optional name of the release channel for Spark version used by DLT pipeline. Supported values are: CURRENT (default) and PREVIEW.

clusters List<Property Map>

blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscale block is extended with the mode parameter that controls the autoscaling algorithm (possible values are ENHANCED for new, enhanced autoscaling algorithm, or LEGACY for old algorithm).

configuration Map<Any>

An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.

continuous Boolean

A flag indicating whether to run the pipeline continuously. The default value is false.

development Boolean

A flag indicating whether to run the pipeline in development mode. The default value is true.

edition String

optional name of the product edition. Supported values are: CORE, PRO, ADVANCED (default).

filters Property Map
libraries List<Property Map>

blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook & file library types that should have the path attribute. Right now only the notebook & file types are supported.

name String

A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.

notifications List<Property Map>
photon Boolean

A flag indicating whether to use Photon engine. The default value is false.

serverless Boolean
storage String

A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).

target String

The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

url String

Supporting Types

PipelineCluster

PipelineClusterAutoscale

maxWorkers Integer
minWorkers Integer
mode String
maxWorkers number
minWorkers number
mode string
maxWorkers Number
minWorkers Number
mode String

PipelineClusterAwsAttributes

PipelineClusterAzureAttributes

PipelineClusterClusterLogConf

PipelineClusterClusterLogConfDbfs

PipelineClusterClusterLogConfS3

Destination string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
Destination string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
destination String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String
destination string
cannedAcl string
enableEncryption boolean
encryptionType string
endpoint string
kmsKey string
region string
destination String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String

PipelineClusterGcpAttributes

PipelineClusterInitScript

PipelineClusterInitScriptAbfss

PipelineClusterInitScriptDbfs

PipelineClusterInitScriptFile

PipelineClusterInitScriptGcs

PipelineClusterInitScriptS3

Destination string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
Destination string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
destination String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String
destination string
cannedAcl string
enableEncryption boolean
encryptionType string
endpoint string
kmsKey string
region string
destination String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String

PipelineClusterInitScriptWorkspace

PipelineFilters

Excludes List<string>
Includes List<string>
Excludes []string
Includes []string
excludes List<String>
includes List<String>
excludes string[]
includes string[]
excludes Sequence[str]
includes Sequence[str]
excludes List<String>
includes List<String>

PipelineLibrary

PipelineLibraryFile

Path string
Path string
path String
path string
path str
path String

PipelineLibraryMaven

Coordinates string
Exclusions List<string>
Repo string
Coordinates string
Exclusions []string
Repo string
coordinates String
exclusions List<String>
repo String
coordinates string
exclusions string[]
repo string
coordinates str
exclusions Sequence[str]
repo str
coordinates String
exclusions List<String>
repo String

PipelineLibraryNotebook

Path string
Path string
path String
path string
path str
path String

PipelineNotification

Alerts List<string>

non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list

  • on-update-success - a pipeline update completes successfully.
  • on-update-failure - a pipeline update fails with a retryable error.
  • on-update-fatal-failure - a pipeline update fails with a non-retryable (fatal) error.
  • on-flow-failure - a single data flow fails.
EmailRecipients List<string>

non-empty list of emails to notify.

Alerts []string

non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list

  • on-update-success - a pipeline update completes successfully.
  • on-update-failure - a pipeline update fails with a retryable error.
  • on-update-fatal-failure - a pipeline update fails with a non-retryable (fatal) error.
  • on-flow-failure - a single data flow fails.
EmailRecipients []string

non-empty list of emails to notify.

alerts List<String>

non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list

  • on-update-success - a pipeline update completes successfully.
  • on-update-failure - a pipeline update fails with a retryable error.
  • on-update-fatal-failure - a pipeline update fails with a non-retryable (fatal) error.
  • on-flow-failure - a single data flow fails.
emailRecipients List<String>

non-empty list of emails to notify.

alerts string[]

non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list

  • on-update-success - a pipeline update completes successfully.
  • on-update-failure - a pipeline update fails with a retryable error.
  • on-update-fatal-failure - a pipeline update fails with a non-retryable (fatal) error.
  • on-flow-failure - a single data flow fails.
emailRecipients string[]

non-empty list of emails to notify.

alerts Sequence[str]

non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list

  • on-update-success - a pipeline update completes successfully.
  • on-update-failure - a pipeline update fails with a retryable error.
  • on-update-fatal-failure - a pipeline update fails with a non-retryable (fatal) error.
  • on-flow-failure - a single data flow fails.
email_recipients Sequence[str]

non-empty list of emails to notify.

alerts List<String>

non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list

  • on-update-success - a pipeline update completes successfully.
  • on-update-failure - a pipeline update fails with a retryable error.
  • on-update-fatal-failure - a pipeline update fails with a non-retryable (fatal) error.
  • on-flow-failure - a single data flow fails.
emailRecipients List<String>

non-empty list of emails to notify.

Import

The resource job can be imported using the id of the pipeline bash

 $ pulumi import databricks:index/pipeline:Pipeline this <pipeline-id>

Package Details

Repository
databricks pulumi/pulumi-databricks
License
Apache-2.0
Notes

This Pulumi package is based on the databricks Terraform Provider.