databricks.Pipeline
Explore with Pulumi AI
Use databricks.Pipeline
to deploy Delta Live Tables.
Related Resources
The following resources are often used in the same context:
- End to end workspace management guide.
- databricks.getPipelines to retrieve Delta Live Tables pipeline data.
- databricks.Cluster to create Databricks Clusters.
- databricks.Job to manage Databricks Jobs to run non-interactive code in a databricks_cluster.
- databricks.Notebook to manage Databricks Notebooks.
Example Usage
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var dltDemoNotebook = new Databricks.Notebook("dltDemoNotebook");
//...
var dltDemoRepo = new Databricks.Repo("dltDemoRepo");
//...
var @this = new Databricks.Pipeline("this", new()
{
Storage = "/test/first-pipeline",
Configuration =
{
{ "key1", "value1" },
{ "key2", "value2" },
},
Clusters = new[]
{
new Databricks.Inputs.PipelineClusterArgs
{
Label = "default",
NumWorkers = 2,
CustomTags =
{
{ "cluster_type", "default" },
},
},
new Databricks.Inputs.PipelineClusterArgs
{
Label = "maintenance",
NumWorkers = 1,
CustomTags =
{
{ "cluster_type", "maintenance" },
},
},
},
Libraries = new[]
{
new Databricks.Inputs.PipelineLibraryArgs
{
Notebook = new Databricks.Inputs.PipelineLibraryNotebookArgs
{
Path = dltDemoNotebook.Id,
},
},
new Databricks.Inputs.PipelineLibraryArgs
{
File = new Databricks.Inputs.PipelineLibraryFileArgs
{
Path = dltDemoRepo.Path.Apply(path => $"{path}/pipeline.sql"),
},
},
},
Continuous = false,
Notifications = new[]
{
new Databricks.Inputs.PipelineNotificationArgs
{
EmailRecipients = new[]
{
"user@domain.com",
"user1@domain.com",
},
Alerts = new[]
{
"on-update-failure",
"on-update-fatal-failure",
"on-update-success",
"on-flow-failure",
},
},
},
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
dltDemoNotebook, err := databricks.NewNotebook(ctx, "dltDemoNotebook", nil)
if err != nil {
return err
}
dltDemoRepo, err := databricks.NewRepo(ctx, "dltDemoRepo", nil)
if err != nil {
return err
}
_, err = databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{
Storage: pulumi.String("/test/first-pipeline"),
Configuration: pulumi.AnyMap{
"key1": pulumi.Any("value1"),
"key2": pulumi.Any("value2"),
},
Clusters: databricks.PipelineClusterArray{
&databricks.PipelineClusterArgs{
Label: pulumi.String("default"),
NumWorkers: pulumi.Int(2),
CustomTags: pulumi.AnyMap{
"cluster_type": pulumi.Any("default"),
},
},
&databricks.PipelineClusterArgs{
Label: pulumi.String("maintenance"),
NumWorkers: pulumi.Int(1),
CustomTags: pulumi.AnyMap{
"cluster_type": pulumi.Any("maintenance"),
},
},
},
Libraries: databricks.PipelineLibraryArray{
&databricks.PipelineLibraryArgs{
Notebook: &databricks.PipelineLibraryNotebookArgs{
Path: dltDemoNotebook.ID(),
},
},
&databricks.PipelineLibraryArgs{
File: &databricks.PipelineLibraryFileArgs{
Path: dltDemoRepo.Path.ApplyT(func(path string) (string, error) {
return fmt.Sprintf("%v/pipeline.sql", path), nil
}).(pulumi.StringOutput),
},
},
},
Continuous: pulumi.Bool(false),
Notifications: databricks.PipelineNotificationArray{
&databricks.PipelineNotificationArgs{
EmailRecipients: pulumi.StringArray{
pulumi.String("user@domain.com"),
pulumi.String("user1@domain.com"),
},
Alerts: pulumi.StringArray{
pulumi.String("on-update-failure"),
pulumi.String("on-update-fatal-failure"),
pulumi.String("on-update-success"),
pulumi.String("on-flow-failure"),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Notebook;
import com.pulumi.databricks.Repo;
import com.pulumi.databricks.Pipeline;
import com.pulumi.databricks.PipelineArgs;
import com.pulumi.databricks.inputs.PipelineClusterArgs;
import com.pulumi.databricks.inputs.PipelineLibraryArgs;
import com.pulumi.databricks.inputs.PipelineLibraryNotebookArgs;
import com.pulumi.databricks.inputs.PipelineLibraryFileArgs;
import com.pulumi.databricks.inputs.PipelineNotificationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var dltDemoNotebook = new Notebook("dltDemoNotebook");
var dltDemoRepo = new Repo("dltDemoRepo");
var this_ = new Pipeline("this", PipelineArgs.builder()
.storage("/test/first-pipeline")
.configuration(Map.ofEntries(
Map.entry("key1", "value1"),
Map.entry("key2", "value2")
))
.clusters(
PipelineClusterArgs.builder()
.label("default")
.numWorkers(2)
.customTags(Map.of("cluster_type", "default"))
.build(),
PipelineClusterArgs.builder()
.label("maintenance")
.numWorkers(1)
.customTags(Map.of("cluster_type", "maintenance"))
.build())
.libraries(
PipelineLibraryArgs.builder()
.notebook(PipelineLibraryNotebookArgs.builder()
.path(dltDemoNotebook.id())
.build())
.build(),
PipelineLibraryArgs.builder()
.file(PipelineLibraryFileArgs.builder()
.path(dltDemoRepo.path().applyValue(path -> String.format("%s/pipeline.sql", path)))
.build())
.build())
.continuous(false)
.notifications(PipelineNotificationArgs.builder()
.emailRecipients(
"user@domain.com",
"user1@domain.com")
.alerts(
"on-update-failure",
"on-update-fatal-failure",
"on-update-success",
"on-flow-failure")
.build())
.build());
}
}
import pulumi
import pulumi_databricks as databricks
dlt_demo_notebook = databricks.Notebook("dltDemoNotebook")
#...
dlt_demo_repo = databricks.Repo("dltDemoRepo")
#...
this = databricks.Pipeline("this",
storage="/test/first-pipeline",
configuration={
"key1": "value1",
"key2": "value2",
},
clusters=[
databricks.PipelineClusterArgs(
label="default",
num_workers=2,
custom_tags={
"cluster_type": "default",
},
),
databricks.PipelineClusterArgs(
label="maintenance",
num_workers=1,
custom_tags={
"cluster_type": "maintenance",
},
),
],
libraries=[
databricks.PipelineLibraryArgs(
notebook=databricks.PipelineLibraryNotebookArgs(
path=dlt_demo_notebook.id,
),
),
databricks.PipelineLibraryArgs(
file=databricks.PipelineLibraryFileArgs(
path=dlt_demo_repo.path.apply(lambda path: f"{path}/pipeline.sql"),
),
),
],
continuous=False,
notifications=[databricks.PipelineNotificationArgs(
email_recipients=[
"user@domain.com",
"user1@domain.com",
],
alerts=[
"on-update-failure",
"on-update-fatal-failure",
"on-update-success",
"on-flow-failure",
],
)])
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const dltDemoNotebook = new databricks.Notebook("dltDemoNotebook", {});
//...
const dltDemoRepo = new databricks.Repo("dltDemoRepo", {});
//...
const _this = new databricks.Pipeline("this", {
storage: "/test/first-pipeline",
configuration: {
key1: "value1",
key2: "value2",
},
clusters: [
{
label: "default",
numWorkers: 2,
customTags: {
cluster_type: "default",
},
},
{
label: "maintenance",
numWorkers: 1,
customTags: {
cluster_type: "maintenance",
},
},
],
libraries: [
{
notebook: {
path: dltDemoNotebook.id,
},
},
{
file: {
path: pulumi.interpolate`${dltDemoRepo.path}/pipeline.sql`,
},
},
],
continuous: false,
notifications: [{
emailRecipients: [
"user@domain.com",
"user1@domain.com",
],
alerts: [
"on-update-failure",
"on-update-fatal-failure",
"on-update-success",
"on-flow-failure",
],
}],
});
resources:
dltDemoNotebook:
type: databricks:Notebook
dltDemoRepo:
type: databricks:Repo
this:
type: databricks:Pipeline
properties:
storage: /test/first-pipeline
configuration:
key1: value1
key2: value2
clusters:
- label: default
numWorkers: 2
customTags:
cluster_type: default
- label: maintenance
numWorkers: 1
customTags:
cluster_type: maintenance
libraries:
- notebook:
path: ${dltDemoNotebook.id}
- file:
path: ${dltDemoRepo.path}/pipeline.sql
continuous: false
notifications:
- emailRecipients:
- user@domain.com
- user1@domain.com
alerts:
- on-update-failure
- on-update-fatal-failure
- on-update-success
- on-flow-failure
Create Pipeline Resource
new Pipeline(name: string, args?: PipelineArgs, opts?: CustomResourceOptions);
@overload
def Pipeline(resource_name: str,
opts: Optional[ResourceOptions] = None,
allow_duplicate_names: Optional[bool] = None,
catalog: Optional[str] = None,
channel: Optional[str] = None,
clusters: Optional[Sequence[PipelineClusterArgs]] = None,
configuration: Optional[Mapping[str, Any]] = None,
continuous: Optional[bool] = None,
development: Optional[bool] = None,
edition: Optional[str] = None,
filters: Optional[PipelineFiltersArgs] = None,
libraries: Optional[Sequence[PipelineLibraryArgs]] = None,
name: Optional[str] = None,
notifications: Optional[Sequence[PipelineNotificationArgs]] = None,
photon: Optional[bool] = None,
serverless: Optional[bool] = None,
storage: Optional[str] = None,
target: Optional[str] = None)
@overload
def Pipeline(resource_name: str,
args: Optional[PipelineArgs] = None,
opts: Optional[ResourceOptions] = None)
func NewPipeline(ctx *Context, name string, args *PipelineArgs, opts ...ResourceOption) (*Pipeline, error)
public Pipeline(string name, PipelineArgs? args = null, CustomResourceOptions? opts = null)
public Pipeline(String name, PipelineArgs args)
public Pipeline(String name, PipelineArgs args, CustomResourceOptions options)
type: databricks:Pipeline
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Pipeline Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Pipeline resource accepts the following input properties:
- Allow
Duplicate boolNames - Catalog string
The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
).- Channel string
optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
.- Clusters
List<Pipeline
Cluster Args> blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm).- Configuration Dictionary<string, object>
An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
A flag indicating whether to run the pipeline continuously. The default value is
false
.- Development bool
A flag indicating whether to run the pipeline in development mode. The default value is
true
.- Edition string
optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default).- Filters
Pipeline
Filters Args - Libraries
List<Pipeline
Library Args> blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported.- Name string
A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Notifications
List<Pipeline
Notification Args> - Photon bool
A flag indicating whether to use Photon engine. The default value is
false
.- Serverless bool
- Storage string
A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
).- Target string
The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Allow
Duplicate boolNames - Catalog string
The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
).- Channel string
optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
.- Clusters
[]Pipeline
Cluster Args blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm).- Configuration map[string]interface{}
An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
A flag indicating whether to run the pipeline continuously. The default value is
false
.- Development bool
A flag indicating whether to run the pipeline in development mode. The default value is
true
.- Edition string
optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default).- Filters
Pipeline
Filters Args - Libraries
[]Pipeline
Library Args blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported.- Name string
A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Notifications
[]Pipeline
Notification Args - Photon bool
A flag indicating whether to use Photon engine. The default value is
false
.- Serverless bool
- Storage string
A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
).- Target string
The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- allow
Duplicate BooleanNames - catalog String
The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
).- channel String
optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
.- clusters
List<Pipeline
Cluster Args> blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm).- configuration Map<String,Object>
An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
A flag indicating whether to run the pipeline continuously. The default value is
false
.- development Boolean
A flag indicating whether to run the pipeline in development mode. The default value is
true
.- edition String
optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default).- filters
Pipeline
Filters Args - libraries
List<Pipeline
Library Args> blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported.- name String
A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
List<Pipeline
Notification Args> - photon Boolean
A flag indicating whether to use Photon engine. The default value is
false
.- serverless Boolean
- storage String
A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
).- target String
The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- allow
Duplicate booleanNames - catalog string
The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
).- channel string
optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
.- clusters
Pipeline
Cluster Args[] blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm).- configuration {[key: string]: any}
An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous boolean
A flag indicating whether to run the pipeline continuously. The default value is
false
.- development boolean
A flag indicating whether to run the pipeline in development mode. The default value is
true
.- edition string
optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default).- filters
Pipeline
Filters Args - libraries
Pipeline
Library Args[] blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported.- name string
A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
Pipeline
Notification Args[] - photon boolean
A flag indicating whether to use Photon engine. The default value is
false
.- serverless boolean
- storage string
A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
).- target string
The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- allow_
duplicate_ boolnames - catalog str
The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
).- channel str
optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
.- clusters
Sequence[Pipeline
Cluster Args] blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm).- configuration Mapping[str, Any]
An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous bool
A flag indicating whether to run the pipeline continuously. The default value is
false
.- development bool
A flag indicating whether to run the pipeline in development mode. The default value is
true
.- edition str
optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default).- filters
Pipeline
Filters Args - libraries
Sequence[Pipeline
Library Args] blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported.- name str
A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
Sequence[Pipeline
Notification Args] - photon bool
A flag indicating whether to use Photon engine. The default value is
false
.- serverless bool
- storage str
A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
).- target str
The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- allow
Duplicate BooleanNames - catalog String
The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
).- channel String
optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
.- clusters List<Property Map>
blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm).- configuration Map<Any>
An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
A flag indicating whether to run the pipeline continuously. The default value is
false
.- development Boolean
A flag indicating whether to run the pipeline in development mode. The default value is
true
.- edition String
optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default).- filters Property Map
- libraries List<Property Map>
blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported.- name String
A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications List<Property Map>
- photon Boolean
A flag indicating whether to use Photon engine. The default value is
false
.- serverless Boolean
- storage String
A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
).- target String
The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
Outputs
All input properties are implicitly available as output properties. Additionally, the Pipeline resource produces the following output properties:
Look up Existing Pipeline Resource
Get an existing Pipeline resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: PipelineState, opts?: CustomResourceOptions): Pipeline
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
allow_duplicate_names: Optional[bool] = None,
catalog: Optional[str] = None,
channel: Optional[str] = None,
clusters: Optional[Sequence[PipelineClusterArgs]] = None,
configuration: Optional[Mapping[str, Any]] = None,
continuous: Optional[bool] = None,
development: Optional[bool] = None,
edition: Optional[str] = None,
filters: Optional[PipelineFiltersArgs] = None,
libraries: Optional[Sequence[PipelineLibraryArgs]] = None,
name: Optional[str] = None,
notifications: Optional[Sequence[PipelineNotificationArgs]] = None,
photon: Optional[bool] = None,
serverless: Optional[bool] = None,
storage: Optional[str] = None,
target: Optional[str] = None,
url: Optional[str] = None) -> Pipeline
func GetPipeline(ctx *Context, name string, id IDInput, state *PipelineState, opts ...ResourceOption) (*Pipeline, error)
public static Pipeline Get(string name, Input<string> id, PipelineState? state, CustomResourceOptions? opts = null)
public static Pipeline get(String name, Output<String> id, PipelineState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Allow
Duplicate boolNames - Catalog string
The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
).- Channel string
optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
.- Clusters
List<Pipeline
Cluster Args> blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm).- Configuration Dictionary<string, object>
An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
A flag indicating whether to run the pipeline continuously. The default value is
false
.- Development bool
A flag indicating whether to run the pipeline in development mode. The default value is
true
.- Edition string
optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default).- Filters
Pipeline
Filters Args - Libraries
List<Pipeline
Library Args> blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported.- Name string
A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Notifications
List<Pipeline
Notification Args> - Photon bool
A flag indicating whether to use Photon engine. The default value is
false
.- Serverless bool
- Storage string
A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
).- Target string
The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Url string
- Allow
Duplicate boolNames - Catalog string
The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
).- Channel string
optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
.- Clusters
[]Pipeline
Cluster Args blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm).- Configuration map[string]interface{}
An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
A flag indicating whether to run the pipeline continuously. The default value is
false
.- Development bool
A flag indicating whether to run the pipeline in development mode. The default value is
true
.- Edition string
optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default).- Filters
Pipeline
Filters Args - Libraries
[]Pipeline
Library Args blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported.- Name string
A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Notifications
[]Pipeline
Notification Args - Photon bool
A flag indicating whether to use Photon engine. The default value is
false
.- Serverless bool
- Storage string
A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
).- Target string
The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Url string
- allow
Duplicate BooleanNames - catalog String
The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
).- channel String
optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
.- clusters
List<Pipeline
Cluster Args> blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm).- configuration Map<String,Object>
An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
A flag indicating whether to run the pipeline continuously. The default value is
false
.- development Boolean
A flag indicating whether to run the pipeline in development mode. The default value is
true
.- edition String
optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default).- filters
Pipeline
Filters Args - libraries
List<Pipeline
Library Args> blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported.- name String
A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
List<Pipeline
Notification Args> - photon Boolean
A flag indicating whether to use Photon engine. The default value is
false
.- serverless Boolean
- storage String
A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
).- target String
The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- url String
- allow
Duplicate booleanNames - catalog string
The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
).- channel string
optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
.- clusters
Pipeline
Cluster Args[] blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm).- configuration {[key: string]: any}
An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous boolean
A flag indicating whether to run the pipeline continuously. The default value is
false
.- development boolean
A flag indicating whether to run the pipeline in development mode. The default value is
true
.- edition string
optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default).- filters
Pipeline
Filters Args - libraries
Pipeline
Library Args[] blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported.- name string
A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
Pipeline
Notification Args[] - photon boolean
A flag indicating whether to use Photon engine. The default value is
false
.- serverless boolean
- storage string
A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
).- target string
The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- url string
- allow_
duplicate_ boolnames - catalog str
The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
).- channel str
optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
.- clusters
Sequence[Pipeline
Cluster Args] blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm).- configuration Mapping[str, Any]
An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous bool
A flag indicating whether to run the pipeline continuously. The default value is
false
.- development bool
A flag indicating whether to run the pipeline in development mode. The default value is
true
.- edition str
optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default).- filters
Pipeline
Filters Args - libraries
Sequence[Pipeline
Library Args] blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported.- name str
A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
Sequence[Pipeline
Notification Args] - photon bool
A flag indicating whether to use Photon engine. The default value is
false
.- serverless bool
- storage str
A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
).- target str
The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- url str
- allow
Duplicate BooleanNames - catalog String
The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
).- channel String
optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
.- clusters List<Property Map>
blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm).- configuration Map<Any>
An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
A flag indicating whether to run the pipeline continuously. The default value is
false
.- development Boolean
A flag indicating whether to run the pipeline in development mode. The default value is
true
.- edition String
optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default).- filters Property Map
- libraries List<Property Map>
blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported.- name String
A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications List<Property Map>
- photon Boolean
A flag indicating whether to use Photon engine. The default value is
false
.- serverless Boolean
- storage String
A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
).- target String
The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- url String
Supporting Types
PipelineCluster
- Apply
Policy boolDefault Values - Autoscale
Pipeline
Cluster Autoscale - Aws
Attributes PipelineCluster Aws Attributes - Azure
Attributes PipelineCluster Azure Attributes - Cluster
Log PipelineConf Cluster Cluster Log Conf - Dictionary<string, object>
- Driver
Instance stringPool Id - Driver
Node stringType Id - Enable
Local boolDisk Encryption - Gcp
Attributes PipelineCluster Gcp Attributes - Init
Scripts List<PipelineCluster Init Script> - Instance
Pool stringId - Label string
- Node
Type stringId - Num
Workers int - Policy
Id string - Spark
Conf Dictionary<string, object> - Spark
Env Dictionary<string, object>Vars - Ssh
Public List<string>Keys
- Apply
Policy boolDefault Values - Autoscale
Pipeline
Cluster Autoscale - Aws
Attributes PipelineCluster Aws Attributes - Azure
Attributes PipelineCluster Azure Attributes - Cluster
Log PipelineConf Cluster Cluster Log Conf - map[string]interface{}
- Driver
Instance stringPool Id - Driver
Node stringType Id - Enable
Local boolDisk Encryption - Gcp
Attributes PipelineCluster Gcp Attributes - Init
Scripts []PipelineCluster Init Script - Instance
Pool stringId - Label string
- Node
Type stringId - Num
Workers int - Policy
Id string - Spark
Conf map[string]interface{} - Spark
Env map[string]interface{}Vars - Ssh
Public []stringKeys
- apply
Policy BooleanDefault Values - autoscale
Pipeline
Cluster Autoscale - aws
Attributes PipelineCluster Aws Attributes - azure
Attributes PipelineCluster Azure Attributes - cluster
Log PipelineConf Cluster Cluster Log Conf - Map<String,Object>
- driver
Instance StringPool Id - driver
Node StringType Id - enable
Local BooleanDisk Encryption - gcp
Attributes PipelineCluster Gcp Attributes - init
Scripts List<PipelineCluster Init Script> - instance
Pool StringId - label String
- node
Type StringId - num
Workers Integer - policy
Id String - spark
Conf Map<String,Object> - spark
Env Map<String,Object>Vars - ssh
Public List<String>Keys
- apply
Policy booleanDefault Values - autoscale
Pipeline
Cluster Autoscale - aws
Attributes PipelineCluster Aws Attributes - azure
Attributes PipelineCluster Azure Attributes - cluster
Log PipelineConf Cluster Cluster Log Conf - {[key: string]: any}
- driver
Instance stringPool Id - driver
Node stringType Id - enable
Local booleanDisk Encryption - gcp
Attributes PipelineCluster Gcp Attributes - init
Scripts PipelineCluster Init Script[] - instance
Pool stringId - label string
- node
Type stringId - num
Workers number - policy
Id string - spark
Conf {[key: string]: any} - spark
Env {[key: string]: any}Vars - ssh
Public string[]Keys
- apply_
policy_ booldefault_ values - autoscale
Pipeline
Cluster Autoscale - aws_
attributes PipelineCluster Aws Attributes - azure_
attributes PipelineCluster Azure Attributes - cluster_
log_ Pipelineconf Cluster Cluster Log Conf - Mapping[str, Any]
- driver_
instance_ strpool_ id - driver_
node_ strtype_ id - enable_
local_ booldisk_ encryption - gcp_
attributes PipelineCluster Gcp Attributes - init_
scripts Sequence[PipelineCluster Init Script] - instance_
pool_ strid - label str
- node_
type_ strid - num_
workers int - policy_
id str - spark_
conf Mapping[str, Any] - spark_
env_ Mapping[str, Any]vars - ssh_
public_ Sequence[str]keys
- apply
Policy BooleanDefault Values - autoscale Property Map
- aws
Attributes Property Map - azure
Attributes Property Map - cluster
Log Property MapConf - Map<Any>
- driver
Instance StringPool Id - driver
Node StringType Id - enable
Local BooleanDisk Encryption - gcp
Attributes Property Map - init
Scripts List<Property Map> - instance
Pool StringId - label String
- node
Type StringId - num
Workers Number - policy
Id String - spark
Conf Map<Any> - spark
Env Map<Any>Vars - ssh
Public List<String>Keys
PipelineClusterAutoscale
- Max
Workers int - Min
Workers int - Mode string
- Max
Workers int - Min
Workers int - Mode string
- max
Workers Integer - min
Workers Integer - mode String
- max
Workers number - min
Workers number - mode string
- max_
workers int - min_
workers int - mode str
- max
Workers Number - min
Workers Number - mode String
PipelineClusterAwsAttributes
- Availability string
- Ebs
Volume intCount - Ebs
Volume intSize - Ebs
Volume stringType - First
On intDemand - Instance
Profile stringArn - Spot
Bid intPrice Percent - Zone
Id string
- Availability string
- Ebs
Volume intCount - Ebs
Volume intSize - Ebs
Volume stringType - First
On intDemand - Instance
Profile stringArn - Spot
Bid intPrice Percent - Zone
Id string
- availability String
- ebs
Volume IntegerCount - ebs
Volume IntegerSize - ebs
Volume StringType - first
On IntegerDemand - instance
Profile StringArn - spot
Bid IntegerPrice Percent - zone
Id String
- availability string
- ebs
Volume numberCount - ebs
Volume numberSize - ebs
Volume stringType - first
On numberDemand - instance
Profile stringArn - spot
Bid numberPrice Percent - zone
Id string
- availability str
- ebs_
volume_ intcount - ebs_
volume_ intsize - ebs_
volume_ strtype - first_
on_ intdemand - instance_
profile_ strarn - spot_
bid_ intprice_ percent - zone_
id str
- availability String
- ebs
Volume NumberCount - ebs
Volume NumberSize - ebs
Volume StringType - first
On NumberDemand - instance
Profile StringArn - spot
Bid NumberPrice Percent - zone
Id String
PipelineClusterAzureAttributes
- Availability string
- First
On intDemand - Spot
Bid doubleMax Price
- Availability string
- First
On intDemand - Spot
Bid float64Max Price
- availability String
- first
On IntegerDemand - spot
Bid DoubleMax Price
- availability string
- first
On numberDemand - spot
Bid numberMax Price
- availability str
- first_
on_ intdemand - spot_
bid_ floatmax_ price
- availability String
- first
On NumberDemand - spot
Bid NumberMax Price
PipelineClusterClusterLogConf
PipelineClusterClusterLogConfDbfs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterClusterLogConfS3
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
PipelineClusterGcpAttributes
- Availability string
- Google
Service stringAccount - Zone
Id string
- Availability string
- Google
Service stringAccount - Zone
Id string
- availability String
- google
Service StringAccount - zone
Id String
- availability string
- google
Service stringAccount - zone
Id string
- availability str
- google_
service_ straccount - zone_
id str
- availability String
- google
Service StringAccount - zone
Id String
PipelineClusterInitScript
PipelineClusterInitScriptAbfss
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptDbfs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptFile
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptGcs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptS3
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
PipelineClusterInitScriptWorkspace
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineFilters
PipelineLibrary
- file Property Map
- jar String
- maven Property Map
- notebook Property Map
- whl String
PipelineLibraryFile
- Path string
- Path string
- path String
- path string
- path str
- path String
PipelineLibraryMaven
- Coordinates string
- Exclusions List<string>
- Repo string
- Coordinates string
- Exclusions []string
- Repo string
- coordinates String
- exclusions List<String>
- repo String
- coordinates string
- exclusions string[]
- repo string
- coordinates str
- exclusions Sequence[str]
- repo str
- coordinates String
- exclusions List<String>
- repo String
PipelineLibraryNotebook
- Path string
- Path string
- path String
- path string
- path str
- path String
PipelineNotification
- Alerts List<string>
non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list
on-update-success
- a pipeline update completes successfully.on-update-failure
- a pipeline update fails with a retryable error.on-update-fatal-failure
- a pipeline update fails with a non-retryable (fatal) error.on-flow-failure
- a single data flow fails.
- Email
Recipients List<string> non-empty list of emails to notify.
- Alerts []string
non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list
on-update-success
- a pipeline update completes successfully.on-update-failure
- a pipeline update fails with a retryable error.on-update-fatal-failure
- a pipeline update fails with a non-retryable (fatal) error.on-flow-failure
- a single data flow fails.
- Email
Recipients []string non-empty list of emails to notify.
- alerts List<String>
non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list
on-update-success
- a pipeline update completes successfully.on-update-failure
- a pipeline update fails with a retryable error.on-update-fatal-failure
- a pipeline update fails with a non-retryable (fatal) error.on-flow-failure
- a single data flow fails.
- email
Recipients List<String> non-empty list of emails to notify.
- alerts string[]
non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list
on-update-success
- a pipeline update completes successfully.on-update-failure
- a pipeline update fails with a retryable error.on-update-fatal-failure
- a pipeline update fails with a non-retryable (fatal) error.on-flow-failure
- a single data flow fails.
- email
Recipients string[] non-empty list of emails to notify.
- alerts Sequence[str]
non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list
on-update-success
- a pipeline update completes successfully.on-update-failure
- a pipeline update fails with a retryable error.on-update-fatal-failure
- a pipeline update fails with a non-retryable (fatal) error.on-flow-failure
- a single data flow fails.
- email_
recipients Sequence[str] non-empty list of emails to notify.
- alerts List<String>
non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list
on-update-success
- a pipeline update completes successfully.on-update-failure
- a pipeline update fails with a retryable error.on-update-fatal-failure
- a pipeline update fails with a non-retryable (fatal) error.on-flow-failure
- a single data flow fails.
- email
Recipients List<String> non-empty list of emails to notify.
Import
The resource job can be imported using the id of the pipeline bash
$ pulumi import databricks:index/pipeline:Pipeline this <pipeline-id>
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
This Pulumi package is based on the
databricks
Terraform Provider.