databricks.Pipeline
Use databricks.Pipeline to deploy Lakeflow Declarative Pipelines.
This resource can only be used with a workspace-level provider!
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const ldpDemo = new databricks.Notebook("ldp_demo", {});
const ldpDemoRepo = new databricks.Repo("ldp_demo", {});
const _this = new databricks.Pipeline("this", {
    name: "Pipeline Name",
    catalog: "main",
    schema: "ldp_demo",
    configuration: {
        key1: "value1",
        key2: "value2",
    },
    clusters: [
        {
            label: "default",
            numWorkers: 2,
            customTags: {
                cluster_type: "default",
            },
        },
        {
            label: "maintenance",
            numWorkers: 1,
            customTags: {
                cluster_type: "maintenance",
            },
        },
    ],
    libraries: [
        {
            notebook: {
                path: ldpDemo.id,
            },
        },
        {
            file: {
                path: pulumi.interpolate`${ldpDemoRepo.path}/pipeline.sql`,
            },
        },
        {
            glob: {
                include: pulumi.interpolate`${ldpDemoRepo.path}/subfolder/**`,
            },
        },
    ],
    continuous: false,
    notifications: [{
        emailRecipients: [
            "user@domain.com",
            "user1@domain.com",
        ],
        alerts: [
            "on-update-failure",
            "on-update-fatal-failure",
            "on-update-success",
            "on-flow-failure",
        ],
    }],
});
import pulumi
import pulumi_databricks as databricks
ldp_demo = databricks.Notebook("ldp_demo")
ldp_demo_repo = databricks.Repo("ldp_demo")
this = databricks.Pipeline("this",
    name="Pipeline Name",
    catalog="main",
    schema="ldp_demo",
    configuration={
        "key1": "value1",
        "key2": "value2",
    },
    clusters=[
        {
            "label": "default",
            "num_workers": 2,
            "custom_tags": {
                "cluster_type": "default",
            },
        },
        {
            "label": "maintenance",
            "num_workers": 1,
            "custom_tags": {
                "cluster_type": "maintenance",
            },
        },
    ],
    libraries=[
        {
            "notebook": {
                "path": ldp_demo.id,
            },
        },
        {
            "file": {
                "path": ldp_demo_repo.path.apply(lambda path: f"{path}/pipeline.sql"),
            },
        },
        {
            "glob": {
                "include": ldp_demo_repo.path.apply(lambda path: f"{path}/subfolder/**"),
            },
        },
    ],
    continuous=False,
    notifications=[{
        "email_recipients": [
            "user@domain.com",
            "user1@domain.com",
        ],
        "alerts": [
            "on-update-failure",
            "on-update-fatal-failure",
            "on-update-success",
            "on-flow-failure",
        ],
    }])
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		ldpDemo, err := databricks.NewNotebook(ctx, "ldp_demo", nil)
		if err != nil {
			return err
		}
		ldpDemoRepo, err := databricks.NewRepo(ctx, "ldp_demo", nil)
		if err != nil {
			return err
		}
		_, err = databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{
			Name:    pulumi.String("Pipeline Name"),
			Catalog: pulumi.String("main"),
			Schema:  pulumi.String("ldp_demo"),
			Configuration: pulumi.StringMap{
				"key1": pulumi.String("value1"),
				"key2": pulumi.String("value2"),
			},
			Clusters: databricks.PipelineClusterArray{
				&databricks.PipelineClusterArgs{
					Label:      pulumi.String("default"),
					NumWorkers: pulumi.Int(2),
					CustomTags: pulumi.StringMap{
						"cluster_type": pulumi.String("default"),
					},
				},
				&databricks.PipelineClusterArgs{
					Label:      pulumi.String("maintenance"),
					NumWorkers: pulumi.Int(1),
					CustomTags: pulumi.StringMap{
						"cluster_type": pulumi.String("maintenance"),
					},
				},
			},
			Libraries: databricks.PipelineLibraryArray{
				&databricks.PipelineLibraryArgs{
					Notebook: &databricks.PipelineLibraryNotebookArgs{
						Path: ldpDemo.ID(),
					},
				},
				&databricks.PipelineLibraryArgs{
					File: &databricks.PipelineLibraryFileArgs{
						Path: ldpDemoRepo.Path.ApplyT(func(path string) (string, error) {
							return fmt.Sprintf("%v/pipeline.sql", path), nil
						}).(pulumi.StringOutput),
					},
				},
				&databricks.PipelineLibraryArgs{
					Glob: &databricks.PipelineLibraryGlobArgs{
						Include: ldpDemoRepo.Path.ApplyT(func(path string) (string, error) {
							return fmt.Sprintf("%v/subfolder/**", path), nil
						}).(pulumi.StringOutput),
					},
				},
			},
			Continuous: pulumi.Bool(false),
			Notifications: databricks.PipelineNotificationArray{
				&databricks.PipelineNotificationArgs{
					EmailRecipients: pulumi.StringArray{
						pulumi.String("user@domain.com"),
						pulumi.String("user1@domain.com"),
					},
					Alerts: pulumi.StringArray{
						pulumi.String("on-update-failure"),
						pulumi.String("on-update-fatal-failure"),
						pulumi.String("on-update-success"),
						pulumi.String("on-flow-failure"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() => 
{
    var ldpDemo = new Databricks.Notebook("ldp_demo");
    var ldpDemoRepo = new Databricks.Repo("ldp_demo");
    var @this = new Databricks.Pipeline("this", new()
    {
        Name = "Pipeline Name",
        Catalog = "main",
        Schema = "ldp_demo",
        Configuration = 
        {
            { "key1", "value1" },
            { "key2", "value2" },
        },
        Clusters = new[]
        {
            new Databricks.Inputs.PipelineClusterArgs
            {
                Label = "default",
                NumWorkers = 2,
                CustomTags = 
                {
                    { "cluster_type", "default" },
                },
            },
            new Databricks.Inputs.PipelineClusterArgs
            {
                Label = "maintenance",
                NumWorkers = 1,
                CustomTags = 
                {
                    { "cluster_type", "maintenance" },
                },
            },
        },
        Libraries = new[]
        {
            new Databricks.Inputs.PipelineLibraryArgs
            {
                Notebook = new Databricks.Inputs.PipelineLibraryNotebookArgs
                {
                    Path = ldpDemo.Id,
                },
            },
            new Databricks.Inputs.PipelineLibraryArgs
            {
                File = new Databricks.Inputs.PipelineLibraryFileArgs
                {
                    Path = ldpDemoRepo.Path.Apply(path => $"{path}/pipeline.sql"),
                },
            },
            new Databricks.Inputs.PipelineLibraryArgs
            {
                Glob = new Databricks.Inputs.PipelineLibraryGlobArgs
                {
                    Include = ldpDemoRepo.Path.Apply(path => $"{path}/subfolder/**"),
                },
            },
        },
        Continuous = false,
        Notifications = new[]
        {
            new Databricks.Inputs.PipelineNotificationArgs
            {
                EmailRecipients = new[]
                {
                    "user@domain.com",
                    "user1@domain.com",
                },
                Alerts = new[]
                {
                    "on-update-failure",
                    "on-update-fatal-failure",
                    "on-update-success",
                    "on-flow-failure",
                },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Notebook;
import com.pulumi.databricks.Repo;
import com.pulumi.databricks.Pipeline;
import com.pulumi.databricks.PipelineArgs;
import com.pulumi.databricks.inputs.PipelineClusterArgs;
import com.pulumi.databricks.inputs.PipelineLibraryArgs;
import com.pulumi.databricks.inputs.PipelineLibraryNotebookArgs;
import com.pulumi.databricks.inputs.PipelineLibraryFileArgs;
import com.pulumi.databricks.inputs.PipelineLibraryGlobArgs;
import com.pulumi.databricks.inputs.PipelineNotificationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var ldpDemo = new Notebook("ldpDemo");
        var ldpDemoRepo = new Repo("ldpDemoRepo");
        var this_ = new Pipeline("this", PipelineArgs.builder()
            .name("Pipeline Name")
            .catalog("main")
            .schema("ldp_demo")
            .configuration(Map.ofEntries(
                Map.entry("key1", "value1"),
                Map.entry("key2", "value2")
            ))
            .clusters(            
                PipelineClusterArgs.builder()
                    .label("default")
                    .numWorkers(2)
                    .customTags(Map.of("cluster_type", "default"))
                    .build(),
                PipelineClusterArgs.builder()
                    .label("maintenance")
                    .numWorkers(1)
                    .customTags(Map.of("cluster_type", "maintenance"))
                    .build())
            .libraries(            
                PipelineLibraryArgs.builder()
                    .notebook(PipelineLibraryNotebookArgs.builder()
                        .path(ldpDemo.id())
                        .build())
                    .build(),
                PipelineLibraryArgs.builder()
                    .file(PipelineLibraryFileArgs.builder()
                        .path(ldpDemoRepo.path().applyValue(_path -> String.format("%s/pipeline.sql", _path)))
                        .build())
                    .build(),
                PipelineLibraryArgs.builder()
                    .glob(PipelineLibraryGlobArgs.builder()
                        .include(ldpDemoRepo.path().applyValue(_path -> String.format("%s/subfolder/**", _path)))
                        .build())
                    .build())
            .continuous(false)
            .notifications(PipelineNotificationArgs.builder()
                .emailRecipients(                
                    "user@domain.com",
                    "user1@domain.com")
                .alerts(                
                    "on-update-failure",
                    "on-update-fatal-failure",
                    "on-update-success",
                    "on-flow-failure")
                .build())
            .build());
    }
}
resources:
  ldpDemo:
    type: databricks:Notebook
    name: ldp_demo
  ldpDemoRepo:
    type: databricks:Repo
    name: ldp_demo
  this:
    type: databricks:Pipeline
    properties:
      name: Pipeline Name
      catalog: main
      schema: ldp_demo
      configuration:
        key1: value1
        key2: value2
      clusters:
        - label: default
          numWorkers: 2
          customTags:
            cluster_type: default
        - label: maintenance
          numWorkers: 1
          customTags:
            cluster_type: maintenance
      libraries:
        - notebook:
            path: ${ldpDemo.id}
        - file:
            path: ${ldpDemoRepo.path}/pipeline.sql
        - glob:
            include: ${ldpDemoRepo.path}/subfolder/**
      continuous: false
      notifications:
        - emailRecipients:
            - user@domain.com
            - user1@domain.com
          alerts:
            - on-update-failure
            - on-update-fatal-failure
            - on-update-success
            - on-flow-failure
Related Resources
The following resources are often used in the same context:
- End to end workspace management guide.
- databricks.getPipelines to retrieve Lakeflow Declarative Pipelines data.
- databricks.Cluster to create Databricks Clusters.
- databricks.Job to manage Databricks Jobs to run non-interactive code in a databricks_cluster.
- databricks.Notebook to manage Databricks Notebooks.
Create Pipeline Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Pipeline(name: string, args?: PipelineArgs, opts?: CustomResourceOptions);@overload
def Pipeline(resource_name: str,
             args: Optional[PipelineArgs] = None,
             opts: Optional[ResourceOptions] = None)
@overload
def Pipeline(resource_name: str,
             opts: Optional[ResourceOptions] = None,
             allow_duplicate_names: Optional[bool] = None,
             budget_policy_id: Optional[str] = None,
             catalog: Optional[str] = None,
             cause: Optional[str] = None,
             channel: Optional[str] = None,
             cluster_id: Optional[str] = None,
             clusters: Optional[Sequence[PipelineClusterArgs]] = None,
             configuration: Optional[Mapping[str, str]] = None,
             continuous: Optional[bool] = None,
             creator_user_name: Optional[str] = None,
             deployment: Optional[PipelineDeploymentArgs] = None,
             development: Optional[bool] = None,
             edition: Optional[str] = None,
             environment: Optional[PipelineEnvironmentArgs] = None,
             event_log: Optional[PipelineEventLogArgs] = None,
             expected_last_modified: Optional[int] = None,
             filters: Optional[PipelineFiltersArgs] = None,
             gateway_definition: Optional[PipelineGatewayDefinitionArgs] = None,
             health: Optional[str] = None,
             ingestion_definition: Optional[PipelineIngestionDefinitionArgs] = None,
             last_modified: Optional[int] = None,
             latest_updates: Optional[Sequence[PipelineLatestUpdateArgs]] = None,
             libraries: Optional[Sequence[PipelineLibraryArgs]] = None,
             name: Optional[str] = None,
             notifications: Optional[Sequence[PipelineNotificationArgs]] = None,
             photon: Optional[bool] = None,
             restart_window: Optional[PipelineRestartWindowArgs] = None,
             root_path: Optional[str] = None,
             run_as: Optional[PipelineRunAsArgs] = None,
             run_as_user_name: Optional[str] = None,
             schema: Optional[str] = None,
             serverless: Optional[bool] = None,
             state: Optional[str] = None,
             storage: Optional[str] = None,
             tags: Optional[Mapping[str, str]] = None,
             target: Optional[str] = None,
             trigger: Optional[PipelineTriggerArgs] = None,
             url: Optional[str] = None)func NewPipeline(ctx *Context, name string, args *PipelineArgs, opts ...ResourceOption) (*Pipeline, error)public Pipeline(string name, PipelineArgs? args = null, CustomResourceOptions? opts = null)
public Pipeline(String name, PipelineArgs args)
public Pipeline(String name, PipelineArgs args, CustomResourceOptions options)
type: databricks:Pipeline
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var pipelineResource = new Databricks.Pipeline("pipelineResource", new()
{
    AllowDuplicateNames = false,
    BudgetPolicyId = "string",
    Catalog = "string",
    Cause = "string",
    Channel = "string",
    ClusterId = "string",
    Clusters = new[]
    {
        new Databricks.Inputs.PipelineClusterArgs
        {
            ApplyPolicyDefaultValues = false,
            Autoscale = new Databricks.Inputs.PipelineClusterAutoscaleArgs
            {
                MaxWorkers = 0,
                MinWorkers = 0,
                Mode = "string",
            },
            AwsAttributes = new Databricks.Inputs.PipelineClusterAwsAttributesArgs
            {
                Availability = "string",
                EbsVolumeCount = 0,
                EbsVolumeIops = 0,
                EbsVolumeSize = 0,
                EbsVolumeThroughput = 0,
                EbsVolumeType = "string",
                FirstOnDemand = 0,
                InstanceProfileArn = "string",
                SpotBidPricePercent = 0,
                ZoneId = "string",
            },
            AzureAttributes = new Databricks.Inputs.PipelineClusterAzureAttributesArgs
            {
                Availability = "string",
                FirstOnDemand = 0,
                LogAnalyticsInfo = new Databricks.Inputs.PipelineClusterAzureAttributesLogAnalyticsInfoArgs
                {
                    LogAnalyticsPrimaryKey = "string",
                    LogAnalyticsWorkspaceId = "string",
                },
                SpotBidMaxPrice = 0,
            },
            ClusterLogConf = new Databricks.Inputs.PipelineClusterClusterLogConfArgs
            {
                Dbfs = new Databricks.Inputs.PipelineClusterClusterLogConfDbfsArgs
                {
                    Destination = "string",
                },
                S3 = new Databricks.Inputs.PipelineClusterClusterLogConfS3Args
                {
                    Destination = "string",
                    CannedAcl = "string",
                    EnableEncryption = false,
                    EncryptionType = "string",
                    Endpoint = "string",
                    KmsKey = "string",
                    Region = "string",
                },
                Volumes = new Databricks.Inputs.PipelineClusterClusterLogConfVolumesArgs
                {
                    Destination = "string",
                },
            },
            CustomTags = 
            {
                { "string", "string" },
            },
            DriverInstancePoolId = "string",
            DriverNodeTypeId = "string",
            EnableLocalDiskEncryption = false,
            GcpAttributes = new Databricks.Inputs.PipelineClusterGcpAttributesArgs
            {
                Availability = "string",
                FirstOnDemand = 0,
                GoogleServiceAccount = "string",
                LocalSsdCount = 0,
                ZoneId = "string",
            },
            InitScripts = new[]
            {
                new Databricks.Inputs.PipelineClusterInitScriptArgs
                {
                    Abfss = new Databricks.Inputs.PipelineClusterInitScriptAbfssArgs
                    {
                        Destination = "string",
                    },
                    File = new Databricks.Inputs.PipelineClusterInitScriptFileArgs
                    {
                        Destination = "string",
                    },
                    Gcs = new Databricks.Inputs.PipelineClusterInitScriptGcsArgs
                    {
                        Destination = "string",
                    },
                    S3 = new Databricks.Inputs.PipelineClusterInitScriptS3Args
                    {
                        Destination = "string",
                        CannedAcl = "string",
                        EnableEncryption = false,
                        EncryptionType = "string",
                        Endpoint = "string",
                        KmsKey = "string",
                        Region = "string",
                    },
                    Volumes = new Databricks.Inputs.PipelineClusterInitScriptVolumesArgs
                    {
                        Destination = "string",
                    },
                    Workspace = new Databricks.Inputs.PipelineClusterInitScriptWorkspaceArgs
                    {
                        Destination = "string",
                    },
                },
            },
            InstancePoolId = "string",
            Label = "string",
            NodeTypeId = "string",
            NumWorkers = 0,
            PolicyId = "string",
            SparkConf = 
            {
                { "string", "string" },
            },
            SparkEnvVars = 
            {
                { "string", "string" },
            },
            SshPublicKeys = new[]
            {
                "string",
            },
        },
    },
    Configuration = 
    {
        { "string", "string" },
    },
    Continuous = false,
    CreatorUserName = "string",
    Deployment = new Databricks.Inputs.PipelineDeploymentArgs
    {
        Kind = "string",
        MetadataFilePath = "string",
    },
    Development = false,
    Edition = "string",
    Environment = new Databricks.Inputs.PipelineEnvironmentArgs
    {
        Dependencies = new[]
        {
            "string",
        },
    },
    EventLog = new Databricks.Inputs.PipelineEventLogArgs
    {
        Name = "string",
        Catalog = "string",
        Schema = "string",
    },
    ExpectedLastModified = 0,
    Filters = new Databricks.Inputs.PipelineFiltersArgs
    {
        Excludes = new[]
        {
            "string",
        },
        Includes = new[]
        {
            "string",
        },
    },
    GatewayDefinition = new Databricks.Inputs.PipelineGatewayDefinitionArgs
    {
        ConnectionName = "string",
        GatewayStorageCatalog = "string",
        GatewayStorageSchema = "string",
        ConnectionId = "string",
        GatewayStorageName = "string",
    },
    Health = "string",
    IngestionDefinition = new Databricks.Inputs.PipelineIngestionDefinitionArgs
    {
        ConnectionName = "string",
        IngestionGatewayId = "string",
        Objects = new[]
        {
            new Databricks.Inputs.PipelineIngestionDefinitionObjectArgs
            {
                Report = new Databricks.Inputs.PipelineIngestionDefinitionObjectReportArgs
                {
                    DestinationCatalog = "string",
                    DestinationSchema = "string",
                    SourceUrl = "string",
                    DestinationTable = "string",
                    TableConfiguration = new Databricks.Inputs.PipelineIngestionDefinitionObjectReportTableConfigurationArgs
                    {
                        ExcludeColumns = new[]
                        {
                            "string",
                        },
                        IncludeColumns = new[]
                        {
                            "string",
                        },
                        PrimaryKeys = new[]
                        {
                            "string",
                        },
                        QueryBasedConnectorConfig = new Databricks.Inputs.PipelineIngestionDefinitionObjectReportTableConfigurationQueryBasedConnectorConfigArgs
                        {
                            CursorColumns = new[]
                            {
                                "string",
                            },
                            DeletionCondition = "string",
                            HardDeletionSyncMinIntervalInSeconds = 0,
                        },
                        SalesforceIncludeFormulaFields = false,
                        ScdType = "string",
                        SequenceBies = new[]
                        {
                            "string",
                        },
                    },
                },
                Schema = new Databricks.Inputs.PipelineIngestionDefinitionObjectSchemaArgs
                {
                    DestinationCatalog = "string",
                    DestinationSchema = "string",
                    SourceSchema = "string",
                    SourceCatalog = "string",
                    TableConfiguration = new Databricks.Inputs.PipelineIngestionDefinitionObjectSchemaTableConfigurationArgs
                    {
                        ExcludeColumns = new[]
                        {
                            "string",
                        },
                        IncludeColumns = new[]
                        {
                            "string",
                        },
                        PrimaryKeys = new[]
                        {
                            "string",
                        },
                        QueryBasedConnectorConfig = new Databricks.Inputs.PipelineIngestionDefinitionObjectSchemaTableConfigurationQueryBasedConnectorConfigArgs
                        {
                            CursorColumns = new[]
                            {
                                "string",
                            },
                            DeletionCondition = "string",
                            HardDeletionSyncMinIntervalInSeconds = 0,
                        },
                        SalesforceIncludeFormulaFields = false,
                        ScdType = "string",
                        SequenceBies = new[]
                        {
                            "string",
                        },
                    },
                },
                Table = new Databricks.Inputs.PipelineIngestionDefinitionObjectTableArgs
                {
                    DestinationCatalog = "string",
                    DestinationSchema = "string",
                    SourceTable = "string",
                    DestinationTable = "string",
                    SourceCatalog = "string",
                    SourceSchema = "string",
                    TableConfiguration = new Databricks.Inputs.PipelineIngestionDefinitionObjectTableTableConfigurationArgs
                    {
                        ExcludeColumns = new[]
                        {
                            "string",
                        },
                        IncludeColumns = new[]
                        {
                            "string",
                        },
                        PrimaryKeys = new[]
                        {
                            "string",
                        },
                        QueryBasedConnectorConfig = new Databricks.Inputs.PipelineIngestionDefinitionObjectTableTableConfigurationQueryBasedConnectorConfigArgs
                        {
                            CursorColumns = new[]
                            {
                                "string",
                            },
                            DeletionCondition = "string",
                            HardDeletionSyncMinIntervalInSeconds = 0,
                        },
                        SalesforceIncludeFormulaFields = false,
                        ScdType = "string",
                        SequenceBies = new[]
                        {
                            "string",
                        },
                    },
                },
            },
        },
        SourceConfigurations = new[]
        {
            new Databricks.Inputs.PipelineIngestionDefinitionSourceConfigurationArgs
            {
                Catalog = new Databricks.Inputs.PipelineIngestionDefinitionSourceConfigurationCatalogArgs
                {
                    Postgres = new Databricks.Inputs.PipelineIngestionDefinitionSourceConfigurationCatalogPostgresArgs
                    {
                        SlotConfig = new Databricks.Inputs.PipelineIngestionDefinitionSourceConfigurationCatalogPostgresSlotConfigArgs
                        {
                            PublicationName = "string",
                            SlotName = "string",
                        },
                    },
                    SourceCatalog = "string",
                },
            },
        },
        SourceType = "string",
        TableConfiguration = new Databricks.Inputs.PipelineIngestionDefinitionTableConfigurationArgs
        {
            ExcludeColumns = new[]
            {
                "string",
            },
            IncludeColumns = new[]
            {
                "string",
            },
            PrimaryKeys = new[]
            {
                "string",
            },
            QueryBasedConnectorConfig = new Databricks.Inputs.PipelineIngestionDefinitionTableConfigurationQueryBasedConnectorConfigArgs
            {
                CursorColumns = new[]
                {
                    "string",
                },
                DeletionCondition = "string",
                HardDeletionSyncMinIntervalInSeconds = 0,
            },
            SalesforceIncludeFormulaFields = false,
            ScdType = "string",
            SequenceBies = new[]
            {
                "string",
            },
        },
    },
    LastModified = 0,
    LatestUpdates = new[]
    {
        new Databricks.Inputs.PipelineLatestUpdateArgs
        {
            CreationTime = "string",
            State = "string",
            UpdateId = "string",
        },
    },
    Libraries = new[]
    {
        new Databricks.Inputs.PipelineLibraryArgs
        {
            File = new Databricks.Inputs.PipelineLibraryFileArgs
            {
                Path = "string",
            },
            Glob = new Databricks.Inputs.PipelineLibraryGlobArgs
            {
                Include = "string",
            },
            Jar = "string",
            Maven = new Databricks.Inputs.PipelineLibraryMavenArgs
            {
                Coordinates = "string",
                Exclusions = new[]
                {
                    "string",
                },
                Repo = "string",
            },
            Notebook = new Databricks.Inputs.PipelineLibraryNotebookArgs
            {
                Path = "string",
            },
        },
    },
    Name = "string",
    Notifications = new[]
    {
        new Databricks.Inputs.PipelineNotificationArgs
        {
            Alerts = new[]
            {
                "string",
            },
            EmailRecipients = new[]
            {
                "string",
            },
        },
    },
    Photon = false,
    RestartWindow = new Databricks.Inputs.PipelineRestartWindowArgs
    {
        StartHour = 0,
        DaysOfWeeks = new[]
        {
            "string",
        },
        TimeZoneId = "string",
    },
    RootPath = "string",
    RunAs = new Databricks.Inputs.PipelineRunAsArgs
    {
        ServicePrincipalName = "string",
        UserName = "string",
    },
    RunAsUserName = "string",
    Schema = "string",
    Serverless = false,
    State = "string",
    Storage = "string",
    Tags = 
    {
        { "string", "string" },
    },
    Target = "string",
    Trigger = new Databricks.Inputs.PipelineTriggerArgs
    {
        Cron = new Databricks.Inputs.PipelineTriggerCronArgs
        {
            QuartzCronSchedule = "string",
            TimezoneId = "string",
        },
        Manual = null,
    },
    Url = "string",
});
example, err := databricks.NewPipeline(ctx, "pipelineResource", &databricks.PipelineArgs{
	AllowDuplicateNames: pulumi.Bool(false),
	BudgetPolicyId:      pulumi.String("string"),
	Catalog:             pulumi.String("string"),
	Cause:               pulumi.String("string"),
	Channel:             pulumi.String("string"),
	ClusterId:           pulumi.String("string"),
	Clusters: databricks.PipelineClusterArray{
		&databricks.PipelineClusterArgs{
			ApplyPolicyDefaultValues: pulumi.Bool(false),
			Autoscale: &databricks.PipelineClusterAutoscaleArgs{
				MaxWorkers: pulumi.Int(0),
				MinWorkers: pulumi.Int(0),
				Mode:       pulumi.String("string"),
			},
			AwsAttributes: &databricks.PipelineClusterAwsAttributesArgs{
				Availability:        pulumi.String("string"),
				EbsVolumeCount:      pulumi.Int(0),
				EbsVolumeIops:       pulumi.Int(0),
				EbsVolumeSize:       pulumi.Int(0),
				EbsVolumeThroughput: pulumi.Int(0),
				EbsVolumeType:       pulumi.String("string"),
				FirstOnDemand:       pulumi.Int(0),
				InstanceProfileArn:  pulumi.String("string"),
				SpotBidPricePercent: pulumi.Int(0),
				ZoneId:              pulumi.String("string"),
			},
			AzureAttributes: &databricks.PipelineClusterAzureAttributesArgs{
				Availability:  pulumi.String("string"),
				FirstOnDemand: pulumi.Int(0),
				LogAnalyticsInfo: &databricks.PipelineClusterAzureAttributesLogAnalyticsInfoArgs{
					LogAnalyticsPrimaryKey:  pulumi.String("string"),
					LogAnalyticsWorkspaceId: pulumi.String("string"),
				},
				SpotBidMaxPrice: pulumi.Float64(0),
			},
			ClusterLogConf: &databricks.PipelineClusterClusterLogConfArgs{
				Dbfs: &databricks.PipelineClusterClusterLogConfDbfsArgs{
					Destination: pulumi.String("string"),
				},
				S3: &databricks.PipelineClusterClusterLogConfS3Args{
					Destination:      pulumi.String("string"),
					CannedAcl:        pulumi.String("string"),
					EnableEncryption: pulumi.Bool(false),
					EncryptionType:   pulumi.String("string"),
					Endpoint:         pulumi.String("string"),
					KmsKey:           pulumi.String("string"),
					Region:           pulumi.String("string"),
				},
				Volumes: &databricks.PipelineClusterClusterLogConfVolumesArgs{
					Destination: pulumi.String("string"),
				},
			},
			CustomTags: pulumi.StringMap{
				"string": pulumi.String("string"),
			},
			DriverInstancePoolId:      pulumi.String("string"),
			DriverNodeTypeId:          pulumi.String("string"),
			EnableLocalDiskEncryption: pulumi.Bool(false),
			GcpAttributes: &databricks.PipelineClusterGcpAttributesArgs{
				Availability:         pulumi.String("string"),
				FirstOnDemand:        pulumi.Int(0),
				GoogleServiceAccount: pulumi.String("string"),
				LocalSsdCount:        pulumi.Int(0),
				ZoneId:               pulumi.String("string"),
			},
			InitScripts: databricks.PipelineClusterInitScriptArray{
				&databricks.PipelineClusterInitScriptArgs{
					Abfss: &databricks.PipelineClusterInitScriptAbfssArgs{
						Destination: pulumi.String("string"),
					},
					File: &databricks.PipelineClusterInitScriptFileArgs{
						Destination: pulumi.String("string"),
					},
					Gcs: &databricks.PipelineClusterInitScriptGcsArgs{
						Destination: pulumi.String("string"),
					},
					S3: &databricks.PipelineClusterInitScriptS3Args{
						Destination:      pulumi.String("string"),
						CannedAcl:        pulumi.String("string"),
						EnableEncryption: pulumi.Bool(false),
						EncryptionType:   pulumi.String("string"),
						Endpoint:         pulumi.String("string"),
						KmsKey:           pulumi.String("string"),
						Region:           pulumi.String("string"),
					},
					Volumes: &databricks.PipelineClusterInitScriptVolumesArgs{
						Destination: pulumi.String("string"),
					},
					Workspace: &databricks.PipelineClusterInitScriptWorkspaceArgs{
						Destination: pulumi.String("string"),
					},
				},
			},
			InstancePoolId: pulumi.String("string"),
			Label:          pulumi.String("string"),
			NodeTypeId:     pulumi.String("string"),
			NumWorkers:     pulumi.Int(0),
			PolicyId:       pulumi.String("string"),
			SparkConf: pulumi.StringMap{
				"string": pulumi.String("string"),
			},
			SparkEnvVars: pulumi.StringMap{
				"string": pulumi.String("string"),
			},
			SshPublicKeys: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
	},
	Configuration: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Continuous:      pulumi.Bool(false),
	CreatorUserName: pulumi.String("string"),
	Deployment: &databricks.PipelineDeploymentArgs{
		Kind:             pulumi.String("string"),
		MetadataFilePath: pulumi.String("string"),
	},
	Development: pulumi.Bool(false),
	Edition:     pulumi.String("string"),
	Environment: &databricks.PipelineEnvironmentArgs{
		Dependencies: pulumi.StringArray{
			pulumi.String("string"),
		},
	},
	EventLog: &databricks.PipelineEventLogArgs{
		Name:    pulumi.String("string"),
		Catalog: pulumi.String("string"),
		Schema:  pulumi.String("string"),
	},
	ExpectedLastModified: pulumi.Int(0),
	Filters: &databricks.PipelineFiltersArgs{
		Excludes: pulumi.StringArray{
			pulumi.String("string"),
		},
		Includes: pulumi.StringArray{
			pulumi.String("string"),
		},
	},
	GatewayDefinition: &databricks.PipelineGatewayDefinitionArgs{
		ConnectionName:        pulumi.String("string"),
		GatewayStorageCatalog: pulumi.String("string"),
		GatewayStorageSchema:  pulumi.String("string"),
		ConnectionId:          pulumi.String("string"),
		GatewayStorageName:    pulumi.String("string"),
	},
	Health: pulumi.String("string"),
	IngestionDefinition: &databricks.PipelineIngestionDefinitionArgs{
		ConnectionName:     pulumi.String("string"),
		IngestionGatewayId: pulumi.String("string"),
		Objects: databricks.PipelineIngestionDefinitionObjectArray{
			&databricks.PipelineIngestionDefinitionObjectArgs{
				Report: &databricks.PipelineIngestionDefinitionObjectReportArgs{
					DestinationCatalog: pulumi.String("string"),
					DestinationSchema:  pulumi.String("string"),
					SourceUrl:          pulumi.String("string"),
					DestinationTable:   pulumi.String("string"),
					TableConfiguration: &databricks.PipelineIngestionDefinitionObjectReportTableConfigurationArgs{
						ExcludeColumns: pulumi.StringArray{
							pulumi.String("string"),
						},
						IncludeColumns: pulumi.StringArray{
							pulumi.String("string"),
						},
						PrimaryKeys: pulumi.StringArray{
							pulumi.String("string"),
						},
						QueryBasedConnectorConfig: &databricks.PipelineIngestionDefinitionObjectReportTableConfigurationQueryBasedConnectorConfigArgs{
							CursorColumns: pulumi.StringArray{
								pulumi.String("string"),
							},
							DeletionCondition:                    pulumi.String("string"),
							HardDeletionSyncMinIntervalInSeconds: pulumi.Int(0),
						},
						SalesforceIncludeFormulaFields: pulumi.Bool(false),
						ScdType:                        pulumi.String("string"),
						SequenceBies: pulumi.StringArray{
							pulumi.String("string"),
						},
					},
				},
				Schema: &databricks.PipelineIngestionDefinitionObjectSchemaArgs{
					DestinationCatalog: pulumi.String("string"),
					DestinationSchema:  pulumi.String("string"),
					SourceSchema:       pulumi.String("string"),
					SourceCatalog:      pulumi.String("string"),
					TableConfiguration: &databricks.PipelineIngestionDefinitionObjectSchemaTableConfigurationArgs{
						ExcludeColumns: pulumi.StringArray{
							pulumi.String("string"),
						},
						IncludeColumns: pulumi.StringArray{
							pulumi.String("string"),
						},
						PrimaryKeys: pulumi.StringArray{
							pulumi.String("string"),
						},
						QueryBasedConnectorConfig: &databricks.PipelineIngestionDefinitionObjectSchemaTableConfigurationQueryBasedConnectorConfigArgs{
							CursorColumns: pulumi.StringArray{
								pulumi.String("string"),
							},
							DeletionCondition:                    pulumi.String("string"),
							HardDeletionSyncMinIntervalInSeconds: pulumi.Int(0),
						},
						SalesforceIncludeFormulaFields: pulumi.Bool(false),
						ScdType:                        pulumi.String("string"),
						SequenceBies: pulumi.StringArray{
							pulumi.String("string"),
						},
					},
				},
				Table: &databricks.PipelineIngestionDefinitionObjectTableArgs{
					DestinationCatalog: pulumi.String("string"),
					DestinationSchema:  pulumi.String("string"),
					SourceTable:        pulumi.String("string"),
					DestinationTable:   pulumi.String("string"),
					SourceCatalog:      pulumi.String("string"),
					SourceSchema:       pulumi.String("string"),
					TableConfiguration: &databricks.PipelineIngestionDefinitionObjectTableTableConfigurationArgs{
						ExcludeColumns: pulumi.StringArray{
							pulumi.String("string"),
						},
						IncludeColumns: pulumi.StringArray{
							pulumi.String("string"),
						},
						PrimaryKeys: pulumi.StringArray{
							pulumi.String("string"),
						},
						QueryBasedConnectorConfig: &databricks.PipelineIngestionDefinitionObjectTableTableConfigurationQueryBasedConnectorConfigArgs{
							CursorColumns: pulumi.StringArray{
								pulumi.String("string"),
							},
							DeletionCondition:                    pulumi.String("string"),
							HardDeletionSyncMinIntervalInSeconds: pulumi.Int(0),
						},
						SalesforceIncludeFormulaFields: pulumi.Bool(false),
						ScdType:                        pulumi.String("string"),
						SequenceBies: pulumi.StringArray{
							pulumi.String("string"),
						},
					},
				},
			},
		},
		SourceConfigurations: databricks.PipelineIngestionDefinitionSourceConfigurationArray{
			&databricks.PipelineIngestionDefinitionSourceConfigurationArgs{
				Catalog: &databricks.PipelineIngestionDefinitionSourceConfigurationCatalogArgs{
					Postgres: &databricks.PipelineIngestionDefinitionSourceConfigurationCatalogPostgresArgs{
						SlotConfig: &databricks.PipelineIngestionDefinitionSourceConfigurationCatalogPostgresSlotConfigArgs{
							PublicationName: pulumi.String("string"),
							SlotName:        pulumi.String("string"),
						},
					},
					SourceCatalog: pulumi.String("string"),
				},
			},
		},
		SourceType: pulumi.String("string"),
		TableConfiguration: &databricks.PipelineIngestionDefinitionTableConfigurationArgs{
			ExcludeColumns: pulumi.StringArray{
				pulumi.String("string"),
			},
			IncludeColumns: pulumi.StringArray{
				pulumi.String("string"),
			},
			PrimaryKeys: pulumi.StringArray{
				pulumi.String("string"),
			},
			QueryBasedConnectorConfig: &databricks.PipelineIngestionDefinitionTableConfigurationQueryBasedConnectorConfigArgs{
				CursorColumns: pulumi.StringArray{
					pulumi.String("string"),
				},
				DeletionCondition:                    pulumi.String("string"),
				HardDeletionSyncMinIntervalInSeconds: pulumi.Int(0),
			},
			SalesforceIncludeFormulaFields: pulumi.Bool(false),
			ScdType:                        pulumi.String("string"),
			SequenceBies: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
	},
	LastModified: pulumi.Int(0),
	LatestUpdates: databricks.PipelineLatestUpdateArray{
		&databricks.PipelineLatestUpdateArgs{
			CreationTime: pulumi.String("string"),
			State:        pulumi.String("string"),
			UpdateId:     pulumi.String("string"),
		},
	},
	Libraries: databricks.PipelineLibraryArray{
		&databricks.PipelineLibraryArgs{
			File: &databricks.PipelineLibraryFileArgs{
				Path: pulumi.String("string"),
			},
			Glob: &databricks.PipelineLibraryGlobArgs{
				Include: pulumi.String("string"),
			},
			Jar: pulumi.String("string"),
			Maven: &databricks.PipelineLibraryMavenArgs{
				Coordinates: pulumi.String("string"),
				Exclusions: pulumi.StringArray{
					pulumi.String("string"),
				},
				Repo: pulumi.String("string"),
			},
			Notebook: &databricks.PipelineLibraryNotebookArgs{
				Path: pulumi.String("string"),
			},
		},
	},
	Name: pulumi.String("string"),
	Notifications: databricks.PipelineNotificationArray{
		&databricks.PipelineNotificationArgs{
			Alerts: pulumi.StringArray{
				pulumi.String("string"),
			},
			EmailRecipients: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
	},
	Photon: pulumi.Bool(false),
	RestartWindow: &databricks.PipelineRestartWindowArgs{
		StartHour: pulumi.Int(0),
		DaysOfWeeks: pulumi.StringArray{
			pulumi.String("string"),
		},
		TimeZoneId: pulumi.String("string"),
	},
	RootPath: pulumi.String("string"),
	RunAs: &databricks.PipelineRunAsArgs{
		ServicePrincipalName: pulumi.String("string"),
		UserName:             pulumi.String("string"),
	},
	RunAsUserName: pulumi.String("string"),
	Schema:        pulumi.String("string"),
	Serverless:    pulumi.Bool(false),
	State:         pulumi.String("string"),
	Storage:       pulumi.String("string"),
	Tags: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Target: pulumi.String("string"),
	Trigger: &databricks.PipelineTriggerArgs{
		Cron: &databricks.PipelineTriggerCronArgs{
			QuartzCronSchedule: pulumi.String("string"),
			TimezoneId:         pulumi.String("string"),
		},
		Manual: &databricks.PipelineTriggerManualArgs{},
	},
	Url: pulumi.String("string"),
})
var pipelineResource = new Pipeline("pipelineResource", PipelineArgs.builder()
    .allowDuplicateNames(false)
    .budgetPolicyId("string")
    .catalog("string")
    .cause("string")
    .channel("string")
    .clusterId("string")
    .clusters(PipelineClusterArgs.builder()
        .applyPolicyDefaultValues(false)
        .autoscale(PipelineClusterAutoscaleArgs.builder()
            .maxWorkers(0)
            .minWorkers(0)
            .mode("string")
            .build())
        .awsAttributes(PipelineClusterAwsAttributesArgs.builder()
            .availability("string")
            .ebsVolumeCount(0)
            .ebsVolumeIops(0)
            .ebsVolumeSize(0)
            .ebsVolumeThroughput(0)
            .ebsVolumeType("string")
            .firstOnDemand(0)
            .instanceProfileArn("string")
            .spotBidPricePercent(0)
            .zoneId("string")
            .build())
        .azureAttributes(PipelineClusterAzureAttributesArgs.builder()
            .availability("string")
            .firstOnDemand(0)
            .logAnalyticsInfo(PipelineClusterAzureAttributesLogAnalyticsInfoArgs.builder()
                .logAnalyticsPrimaryKey("string")
                .logAnalyticsWorkspaceId("string")
                .build())
            .spotBidMaxPrice(0.0)
            .build())
        .clusterLogConf(PipelineClusterClusterLogConfArgs.builder()
            .dbfs(PipelineClusterClusterLogConfDbfsArgs.builder()
                .destination("string")
                .build())
            .s3(PipelineClusterClusterLogConfS3Args.builder()
                .destination("string")
                .cannedAcl("string")
                .enableEncryption(false)
                .encryptionType("string")
                .endpoint("string")
                .kmsKey("string")
                .region("string")
                .build())
            .volumes(PipelineClusterClusterLogConfVolumesArgs.builder()
                .destination("string")
                .build())
            .build())
        .customTags(Map.of("string", "string"))
        .driverInstancePoolId("string")
        .driverNodeTypeId("string")
        .enableLocalDiskEncryption(false)
        .gcpAttributes(PipelineClusterGcpAttributesArgs.builder()
            .availability("string")
            .firstOnDemand(0)
            .googleServiceAccount("string")
            .localSsdCount(0)
            .zoneId("string")
            .build())
        .initScripts(PipelineClusterInitScriptArgs.builder()
            .abfss(PipelineClusterInitScriptAbfssArgs.builder()
                .destination("string")
                .build())
            .file(PipelineClusterInitScriptFileArgs.builder()
                .destination("string")
                .build())
            .gcs(PipelineClusterInitScriptGcsArgs.builder()
                .destination("string")
                .build())
            .s3(PipelineClusterInitScriptS3Args.builder()
                .destination("string")
                .cannedAcl("string")
                .enableEncryption(false)
                .encryptionType("string")
                .endpoint("string")
                .kmsKey("string")
                .region("string")
                .build())
            .volumes(PipelineClusterInitScriptVolumesArgs.builder()
                .destination("string")
                .build())
            .workspace(PipelineClusterInitScriptWorkspaceArgs.builder()
                .destination("string")
                .build())
            .build())
        .instancePoolId("string")
        .label("string")
        .nodeTypeId("string")
        .numWorkers(0)
        .policyId("string")
        .sparkConf(Map.of("string", "string"))
        .sparkEnvVars(Map.of("string", "string"))
        .sshPublicKeys("string")
        .build())
    .configuration(Map.of("string", "string"))
    .continuous(false)
    .creatorUserName("string")
    .deployment(PipelineDeploymentArgs.builder()
        .kind("string")
        .metadataFilePath("string")
        .build())
    .development(false)
    .edition("string")
    .environment(PipelineEnvironmentArgs.builder()
        .dependencies("string")
        .build())
    .eventLog(PipelineEventLogArgs.builder()
        .name("string")
        .catalog("string")
        .schema("string")
        .build())
    .expectedLastModified(0)
    .filters(PipelineFiltersArgs.builder()
        .excludes("string")
        .includes("string")
        .build())
    .gatewayDefinition(PipelineGatewayDefinitionArgs.builder()
        .connectionName("string")
        .gatewayStorageCatalog("string")
        .gatewayStorageSchema("string")
        .connectionId("string")
        .gatewayStorageName("string")
        .build())
    .health("string")
    .ingestionDefinition(PipelineIngestionDefinitionArgs.builder()
        .connectionName("string")
        .ingestionGatewayId("string")
        .objects(PipelineIngestionDefinitionObjectArgs.builder()
            .report(PipelineIngestionDefinitionObjectReportArgs.builder()
                .destinationCatalog("string")
                .destinationSchema("string")
                .sourceUrl("string")
                .destinationTable("string")
                .tableConfiguration(PipelineIngestionDefinitionObjectReportTableConfigurationArgs.builder()
                    .excludeColumns("string")
                    .includeColumns("string")
                    .primaryKeys("string")
                    .queryBasedConnectorConfig(PipelineIngestionDefinitionObjectReportTableConfigurationQueryBasedConnectorConfigArgs.builder()
                        .cursorColumns("string")
                        .deletionCondition("string")
                        .hardDeletionSyncMinIntervalInSeconds(0)
                        .build())
                    .salesforceIncludeFormulaFields(false)
                    .scdType("string")
                    .sequenceBies("string")
                    .build())
                .build())
            .schema(PipelineIngestionDefinitionObjectSchemaArgs.builder()
                .destinationCatalog("string")
                .destinationSchema("string")
                .sourceSchema("string")
                .sourceCatalog("string")
                .tableConfiguration(PipelineIngestionDefinitionObjectSchemaTableConfigurationArgs.builder()
                    .excludeColumns("string")
                    .includeColumns("string")
                    .primaryKeys("string")
                    .queryBasedConnectorConfig(PipelineIngestionDefinitionObjectSchemaTableConfigurationQueryBasedConnectorConfigArgs.builder()
                        .cursorColumns("string")
                        .deletionCondition("string")
                        .hardDeletionSyncMinIntervalInSeconds(0)
                        .build())
                    .salesforceIncludeFormulaFields(false)
                    .scdType("string")
                    .sequenceBies("string")
                    .build())
                .build())
            .table(PipelineIngestionDefinitionObjectTableArgs.builder()
                .destinationCatalog("string")
                .destinationSchema("string")
                .sourceTable("string")
                .destinationTable("string")
                .sourceCatalog("string")
                .sourceSchema("string")
                .tableConfiguration(PipelineIngestionDefinitionObjectTableTableConfigurationArgs.builder()
                    .excludeColumns("string")
                    .includeColumns("string")
                    .primaryKeys("string")
                    .queryBasedConnectorConfig(PipelineIngestionDefinitionObjectTableTableConfigurationQueryBasedConnectorConfigArgs.builder()
                        .cursorColumns("string")
                        .deletionCondition("string")
                        .hardDeletionSyncMinIntervalInSeconds(0)
                        .build())
                    .salesforceIncludeFormulaFields(false)
                    .scdType("string")
                    .sequenceBies("string")
                    .build())
                .build())
            .build())
        .sourceConfigurations(PipelineIngestionDefinitionSourceConfigurationArgs.builder()
            .catalog(PipelineIngestionDefinitionSourceConfigurationCatalogArgs.builder()
                .postgres(PipelineIngestionDefinitionSourceConfigurationCatalogPostgresArgs.builder()
                    .slotConfig(PipelineIngestionDefinitionSourceConfigurationCatalogPostgresSlotConfigArgs.builder()
                        .publicationName("string")
                        .slotName("string")
                        .build())
                    .build())
                .sourceCatalog("string")
                .build())
            .build())
        .sourceType("string")
        .tableConfiguration(PipelineIngestionDefinitionTableConfigurationArgs.builder()
            .excludeColumns("string")
            .includeColumns("string")
            .primaryKeys("string")
            .queryBasedConnectorConfig(PipelineIngestionDefinitionTableConfigurationQueryBasedConnectorConfigArgs.builder()
                .cursorColumns("string")
                .deletionCondition("string")
                .hardDeletionSyncMinIntervalInSeconds(0)
                .build())
            .salesforceIncludeFormulaFields(false)
            .scdType("string")
            .sequenceBies("string")
            .build())
        .build())
    .lastModified(0)
    .latestUpdates(PipelineLatestUpdateArgs.builder()
        .creationTime("string")
        .state("string")
        .updateId("string")
        .build())
    .libraries(PipelineLibraryArgs.builder()
        .file(PipelineLibraryFileArgs.builder()
            .path("string")
            .build())
        .glob(PipelineLibraryGlobArgs.builder()
            .include("string")
            .build())
        .jar("string")
        .maven(PipelineLibraryMavenArgs.builder()
            .coordinates("string")
            .exclusions("string")
            .repo("string")
            .build())
        .notebook(PipelineLibraryNotebookArgs.builder()
            .path("string")
            .build())
        .build())
    .name("string")
    .notifications(PipelineNotificationArgs.builder()
        .alerts("string")
        .emailRecipients("string")
        .build())
    .photon(false)
    .restartWindow(PipelineRestartWindowArgs.builder()
        .startHour(0)
        .daysOfWeeks("string")
        .timeZoneId("string")
        .build())
    .rootPath("string")
    .runAs(PipelineRunAsArgs.builder()
        .servicePrincipalName("string")
        .userName("string")
        .build())
    .runAsUserName("string")
    .schema("string")
    .serverless(false)
    .state("string")
    .storage("string")
    .tags(Map.of("string", "string"))
    .target("string")
    .trigger(PipelineTriggerArgs.builder()
        .cron(PipelineTriggerCronArgs.builder()
            .quartzCronSchedule("string")
            .timezoneId("string")
            .build())
        .manual(PipelineTriggerManualArgs.builder()
            .build())
        .build())
    .url("string")
    .build());
pipeline_resource = databricks.Pipeline("pipelineResource",
    allow_duplicate_names=False,
    budget_policy_id="string",
    catalog="string",
    cause="string",
    channel="string",
    cluster_id="string",
    clusters=[{
        "apply_policy_default_values": False,
        "autoscale": {
            "max_workers": 0,
            "min_workers": 0,
            "mode": "string",
        },
        "aws_attributes": {
            "availability": "string",
            "ebs_volume_count": 0,
            "ebs_volume_iops": 0,
            "ebs_volume_size": 0,
            "ebs_volume_throughput": 0,
            "ebs_volume_type": "string",
            "first_on_demand": 0,
            "instance_profile_arn": "string",
            "spot_bid_price_percent": 0,
            "zone_id": "string",
        },
        "azure_attributes": {
            "availability": "string",
            "first_on_demand": 0,
            "log_analytics_info": {
                "log_analytics_primary_key": "string",
                "log_analytics_workspace_id": "string",
            },
            "spot_bid_max_price": 0,
        },
        "cluster_log_conf": {
            "dbfs": {
                "destination": "string",
            },
            "s3": {
                "destination": "string",
                "canned_acl": "string",
                "enable_encryption": False,
                "encryption_type": "string",
                "endpoint": "string",
                "kms_key": "string",
                "region": "string",
            },
            "volumes": {
                "destination": "string",
            },
        },
        "custom_tags": {
            "string": "string",
        },
        "driver_instance_pool_id": "string",
        "driver_node_type_id": "string",
        "enable_local_disk_encryption": False,
        "gcp_attributes": {
            "availability": "string",
            "first_on_demand": 0,
            "google_service_account": "string",
            "local_ssd_count": 0,
            "zone_id": "string",
        },
        "init_scripts": [{
            "abfss": {
                "destination": "string",
            },
            "file": {
                "destination": "string",
            },
            "gcs": {
                "destination": "string",
            },
            "s3": {
                "destination": "string",
                "canned_acl": "string",
                "enable_encryption": False,
                "encryption_type": "string",
                "endpoint": "string",
                "kms_key": "string",
                "region": "string",
            },
            "volumes": {
                "destination": "string",
            },
            "workspace": {
                "destination": "string",
            },
        }],
        "instance_pool_id": "string",
        "label": "string",
        "node_type_id": "string",
        "num_workers": 0,
        "policy_id": "string",
        "spark_conf": {
            "string": "string",
        },
        "spark_env_vars": {
            "string": "string",
        },
        "ssh_public_keys": ["string"],
    }],
    configuration={
        "string": "string",
    },
    continuous=False,
    creator_user_name="string",
    deployment={
        "kind": "string",
        "metadata_file_path": "string",
    },
    development=False,
    edition="string",
    environment={
        "dependencies": ["string"],
    },
    event_log={
        "name": "string",
        "catalog": "string",
        "schema": "string",
    },
    expected_last_modified=0,
    filters={
        "excludes": ["string"],
        "includes": ["string"],
    },
    gateway_definition={
        "connection_name": "string",
        "gateway_storage_catalog": "string",
        "gateway_storage_schema": "string",
        "connection_id": "string",
        "gateway_storage_name": "string",
    },
    health="string",
    ingestion_definition={
        "connection_name": "string",
        "ingestion_gateway_id": "string",
        "objects": [{
            "report": {
                "destination_catalog": "string",
                "destination_schema": "string",
                "source_url": "string",
                "destination_table": "string",
                "table_configuration": {
                    "exclude_columns": ["string"],
                    "include_columns": ["string"],
                    "primary_keys": ["string"],
                    "query_based_connector_config": {
                        "cursor_columns": ["string"],
                        "deletion_condition": "string",
                        "hard_deletion_sync_min_interval_in_seconds": 0,
                    },
                    "salesforce_include_formula_fields": False,
                    "scd_type": "string",
                    "sequence_bies": ["string"],
                },
            },
            "schema": {
                "destination_catalog": "string",
                "destination_schema": "string",
                "source_schema": "string",
                "source_catalog": "string",
                "table_configuration": {
                    "exclude_columns": ["string"],
                    "include_columns": ["string"],
                    "primary_keys": ["string"],
                    "query_based_connector_config": {
                        "cursor_columns": ["string"],
                        "deletion_condition": "string",
                        "hard_deletion_sync_min_interval_in_seconds": 0,
                    },
                    "salesforce_include_formula_fields": False,
                    "scd_type": "string",
                    "sequence_bies": ["string"],
                },
            },
            "table": {
                "destination_catalog": "string",
                "destination_schema": "string",
                "source_table": "string",
                "destination_table": "string",
                "source_catalog": "string",
                "source_schema": "string",
                "table_configuration": {
                    "exclude_columns": ["string"],
                    "include_columns": ["string"],
                    "primary_keys": ["string"],
                    "query_based_connector_config": {
                        "cursor_columns": ["string"],
                        "deletion_condition": "string",
                        "hard_deletion_sync_min_interval_in_seconds": 0,
                    },
                    "salesforce_include_formula_fields": False,
                    "scd_type": "string",
                    "sequence_bies": ["string"],
                },
            },
        }],
        "source_configurations": [{
            "catalog": {
                "postgres": {
                    "slot_config": {
                        "publication_name": "string",
                        "slot_name": "string",
                    },
                },
                "source_catalog": "string",
            },
        }],
        "source_type": "string",
        "table_configuration": {
            "exclude_columns": ["string"],
            "include_columns": ["string"],
            "primary_keys": ["string"],
            "query_based_connector_config": {
                "cursor_columns": ["string"],
                "deletion_condition": "string",
                "hard_deletion_sync_min_interval_in_seconds": 0,
            },
            "salesforce_include_formula_fields": False,
            "scd_type": "string",
            "sequence_bies": ["string"],
        },
    },
    last_modified=0,
    latest_updates=[{
        "creation_time": "string",
        "state": "string",
        "update_id": "string",
    }],
    libraries=[{
        "file": {
            "path": "string",
        },
        "glob": {
            "include": "string",
        },
        "jar": "string",
        "maven": {
            "coordinates": "string",
            "exclusions": ["string"],
            "repo": "string",
        },
        "notebook": {
            "path": "string",
        },
    }],
    name="string",
    notifications=[{
        "alerts": ["string"],
        "email_recipients": ["string"],
    }],
    photon=False,
    restart_window={
        "start_hour": 0,
        "days_of_weeks": ["string"],
        "time_zone_id": "string",
    },
    root_path="string",
    run_as={
        "service_principal_name": "string",
        "user_name": "string",
    },
    run_as_user_name="string",
    schema="string",
    serverless=False,
    state="string",
    storage="string",
    tags={
        "string": "string",
    },
    target="string",
    trigger={
        "cron": {
            "quartz_cron_schedule": "string",
            "timezone_id": "string",
        },
        "manual": {},
    },
    url="string")
const pipelineResource = new databricks.Pipeline("pipelineResource", {
    allowDuplicateNames: false,
    budgetPolicyId: "string",
    catalog: "string",
    cause: "string",
    channel: "string",
    clusterId: "string",
    clusters: [{
        applyPolicyDefaultValues: false,
        autoscale: {
            maxWorkers: 0,
            minWorkers: 0,
            mode: "string",
        },
        awsAttributes: {
            availability: "string",
            ebsVolumeCount: 0,
            ebsVolumeIops: 0,
            ebsVolumeSize: 0,
            ebsVolumeThroughput: 0,
            ebsVolumeType: "string",
            firstOnDemand: 0,
            instanceProfileArn: "string",
            spotBidPricePercent: 0,
            zoneId: "string",
        },
        azureAttributes: {
            availability: "string",
            firstOnDemand: 0,
            logAnalyticsInfo: {
                logAnalyticsPrimaryKey: "string",
                logAnalyticsWorkspaceId: "string",
            },
            spotBidMaxPrice: 0,
        },
        clusterLogConf: {
            dbfs: {
                destination: "string",
            },
            s3: {
                destination: "string",
                cannedAcl: "string",
                enableEncryption: false,
                encryptionType: "string",
                endpoint: "string",
                kmsKey: "string",
                region: "string",
            },
            volumes: {
                destination: "string",
            },
        },
        customTags: {
            string: "string",
        },
        driverInstancePoolId: "string",
        driverNodeTypeId: "string",
        enableLocalDiskEncryption: false,
        gcpAttributes: {
            availability: "string",
            firstOnDemand: 0,
            googleServiceAccount: "string",
            localSsdCount: 0,
            zoneId: "string",
        },
        initScripts: [{
            abfss: {
                destination: "string",
            },
            file: {
                destination: "string",
            },
            gcs: {
                destination: "string",
            },
            s3: {
                destination: "string",
                cannedAcl: "string",
                enableEncryption: false,
                encryptionType: "string",
                endpoint: "string",
                kmsKey: "string",
                region: "string",
            },
            volumes: {
                destination: "string",
            },
            workspace: {
                destination: "string",
            },
        }],
        instancePoolId: "string",
        label: "string",
        nodeTypeId: "string",
        numWorkers: 0,
        policyId: "string",
        sparkConf: {
            string: "string",
        },
        sparkEnvVars: {
            string: "string",
        },
        sshPublicKeys: ["string"],
    }],
    configuration: {
        string: "string",
    },
    continuous: false,
    creatorUserName: "string",
    deployment: {
        kind: "string",
        metadataFilePath: "string",
    },
    development: false,
    edition: "string",
    environment: {
        dependencies: ["string"],
    },
    eventLog: {
        name: "string",
        catalog: "string",
        schema: "string",
    },
    expectedLastModified: 0,
    filters: {
        excludes: ["string"],
        includes: ["string"],
    },
    gatewayDefinition: {
        connectionName: "string",
        gatewayStorageCatalog: "string",
        gatewayStorageSchema: "string",
        connectionId: "string",
        gatewayStorageName: "string",
    },
    health: "string",
    ingestionDefinition: {
        connectionName: "string",
        ingestionGatewayId: "string",
        objects: [{
            report: {
                destinationCatalog: "string",
                destinationSchema: "string",
                sourceUrl: "string",
                destinationTable: "string",
                tableConfiguration: {
                    excludeColumns: ["string"],
                    includeColumns: ["string"],
                    primaryKeys: ["string"],
                    queryBasedConnectorConfig: {
                        cursorColumns: ["string"],
                        deletionCondition: "string",
                        hardDeletionSyncMinIntervalInSeconds: 0,
                    },
                    salesforceIncludeFormulaFields: false,
                    scdType: "string",
                    sequenceBies: ["string"],
                },
            },
            schema: {
                destinationCatalog: "string",
                destinationSchema: "string",
                sourceSchema: "string",
                sourceCatalog: "string",
                tableConfiguration: {
                    excludeColumns: ["string"],
                    includeColumns: ["string"],
                    primaryKeys: ["string"],
                    queryBasedConnectorConfig: {
                        cursorColumns: ["string"],
                        deletionCondition: "string",
                        hardDeletionSyncMinIntervalInSeconds: 0,
                    },
                    salesforceIncludeFormulaFields: false,
                    scdType: "string",
                    sequenceBies: ["string"],
                },
            },
            table: {
                destinationCatalog: "string",
                destinationSchema: "string",
                sourceTable: "string",
                destinationTable: "string",
                sourceCatalog: "string",
                sourceSchema: "string",
                tableConfiguration: {
                    excludeColumns: ["string"],
                    includeColumns: ["string"],
                    primaryKeys: ["string"],
                    queryBasedConnectorConfig: {
                        cursorColumns: ["string"],
                        deletionCondition: "string",
                        hardDeletionSyncMinIntervalInSeconds: 0,
                    },
                    salesforceIncludeFormulaFields: false,
                    scdType: "string",
                    sequenceBies: ["string"],
                },
            },
        }],
        sourceConfigurations: [{
            catalog: {
                postgres: {
                    slotConfig: {
                        publicationName: "string",
                        slotName: "string",
                    },
                },
                sourceCatalog: "string",
            },
        }],
        sourceType: "string",
        tableConfiguration: {
            excludeColumns: ["string"],
            includeColumns: ["string"],
            primaryKeys: ["string"],
            queryBasedConnectorConfig: {
                cursorColumns: ["string"],
                deletionCondition: "string",
                hardDeletionSyncMinIntervalInSeconds: 0,
            },
            salesforceIncludeFormulaFields: false,
            scdType: "string",
            sequenceBies: ["string"],
        },
    },
    lastModified: 0,
    latestUpdates: [{
        creationTime: "string",
        state: "string",
        updateId: "string",
    }],
    libraries: [{
        file: {
            path: "string",
        },
        glob: {
            include: "string",
        },
        jar: "string",
        maven: {
            coordinates: "string",
            exclusions: ["string"],
            repo: "string",
        },
        notebook: {
            path: "string",
        },
    }],
    name: "string",
    notifications: [{
        alerts: ["string"],
        emailRecipients: ["string"],
    }],
    photon: false,
    restartWindow: {
        startHour: 0,
        daysOfWeeks: ["string"],
        timeZoneId: "string",
    },
    rootPath: "string",
    runAs: {
        servicePrincipalName: "string",
        userName: "string",
    },
    runAsUserName: "string",
    schema: "string",
    serverless: false,
    state: "string",
    storage: "string",
    tags: {
        string: "string",
    },
    target: "string",
    trigger: {
        cron: {
            quartzCronSchedule: "string",
            timezoneId: "string",
        },
        manual: {},
    },
    url: "string",
});
type: databricks:Pipeline
properties:
    allowDuplicateNames: false
    budgetPolicyId: string
    catalog: string
    cause: string
    channel: string
    clusterId: string
    clusters:
        - applyPolicyDefaultValues: false
          autoscale:
            maxWorkers: 0
            minWorkers: 0
            mode: string
          awsAttributes:
            availability: string
            ebsVolumeCount: 0
            ebsVolumeIops: 0
            ebsVolumeSize: 0
            ebsVolumeThroughput: 0
            ebsVolumeType: string
            firstOnDemand: 0
            instanceProfileArn: string
            spotBidPricePercent: 0
            zoneId: string
          azureAttributes:
            availability: string
            firstOnDemand: 0
            logAnalyticsInfo:
                logAnalyticsPrimaryKey: string
                logAnalyticsWorkspaceId: string
            spotBidMaxPrice: 0
          clusterLogConf:
            dbfs:
                destination: string
            s3:
                cannedAcl: string
                destination: string
                enableEncryption: false
                encryptionType: string
                endpoint: string
                kmsKey: string
                region: string
            volumes:
                destination: string
          customTags:
            string: string
          driverInstancePoolId: string
          driverNodeTypeId: string
          enableLocalDiskEncryption: false
          gcpAttributes:
            availability: string
            firstOnDemand: 0
            googleServiceAccount: string
            localSsdCount: 0
            zoneId: string
          initScripts:
            - abfss:
                destination: string
              file:
                destination: string
              gcs:
                destination: string
              s3:
                cannedAcl: string
                destination: string
                enableEncryption: false
                encryptionType: string
                endpoint: string
                kmsKey: string
                region: string
              volumes:
                destination: string
              workspace:
                destination: string
          instancePoolId: string
          label: string
          nodeTypeId: string
          numWorkers: 0
          policyId: string
          sparkConf:
            string: string
          sparkEnvVars:
            string: string
          sshPublicKeys:
            - string
    configuration:
        string: string
    continuous: false
    creatorUserName: string
    deployment:
        kind: string
        metadataFilePath: string
    development: false
    edition: string
    environment:
        dependencies:
            - string
    eventLog:
        catalog: string
        name: string
        schema: string
    expectedLastModified: 0
    filters:
        excludes:
            - string
        includes:
            - string
    gatewayDefinition:
        connectionId: string
        connectionName: string
        gatewayStorageCatalog: string
        gatewayStorageName: string
        gatewayStorageSchema: string
    health: string
    ingestionDefinition:
        connectionName: string
        ingestionGatewayId: string
        objects:
            - report:
                destinationCatalog: string
                destinationSchema: string
                destinationTable: string
                sourceUrl: string
                tableConfiguration:
                    excludeColumns:
                        - string
                    includeColumns:
                        - string
                    primaryKeys:
                        - string
                    queryBasedConnectorConfig:
                        cursorColumns:
                            - string
                        deletionCondition: string
                        hardDeletionSyncMinIntervalInSeconds: 0
                    salesforceIncludeFormulaFields: false
                    scdType: string
                    sequenceBies:
                        - string
              schema:
                destinationCatalog: string
                destinationSchema: string
                sourceCatalog: string
                sourceSchema: string
                tableConfiguration:
                    excludeColumns:
                        - string
                    includeColumns:
                        - string
                    primaryKeys:
                        - string
                    queryBasedConnectorConfig:
                        cursorColumns:
                            - string
                        deletionCondition: string
                        hardDeletionSyncMinIntervalInSeconds: 0
                    salesforceIncludeFormulaFields: false
                    scdType: string
                    sequenceBies:
                        - string
              table:
                destinationCatalog: string
                destinationSchema: string
                destinationTable: string
                sourceCatalog: string
                sourceSchema: string
                sourceTable: string
                tableConfiguration:
                    excludeColumns:
                        - string
                    includeColumns:
                        - string
                    primaryKeys:
                        - string
                    queryBasedConnectorConfig:
                        cursorColumns:
                            - string
                        deletionCondition: string
                        hardDeletionSyncMinIntervalInSeconds: 0
                    salesforceIncludeFormulaFields: false
                    scdType: string
                    sequenceBies:
                        - string
        sourceConfigurations:
            - catalog:
                postgres:
                    slotConfig:
                        publicationName: string
                        slotName: string
                sourceCatalog: string
        sourceType: string
        tableConfiguration:
            excludeColumns:
                - string
            includeColumns:
                - string
            primaryKeys:
                - string
            queryBasedConnectorConfig:
                cursorColumns:
                    - string
                deletionCondition: string
                hardDeletionSyncMinIntervalInSeconds: 0
            salesforceIncludeFormulaFields: false
            scdType: string
            sequenceBies:
                - string
    lastModified: 0
    latestUpdates:
        - creationTime: string
          state: string
          updateId: string
    libraries:
        - file:
            path: string
          glob:
            include: string
          jar: string
          maven:
            coordinates: string
            exclusions:
                - string
            repo: string
          notebook:
            path: string
    name: string
    notifications:
        - alerts:
            - string
          emailRecipients:
            - string
    photon: false
    restartWindow:
        daysOfWeeks:
            - string
        startHour: 0
        timeZoneId: string
    rootPath: string
    runAs:
        servicePrincipalName: string
        userName: string
    runAsUserName: string
    schema: string
    serverless: false
    state: string
    storage: string
    tags:
        string: string
    target: string
    trigger:
        cron:
            quartzCronSchedule: string
            timezoneId: string
        manual: {}
    url: string
Pipeline Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Pipeline resource accepts the following input properties:
- AllowDuplicate boolNames 
- Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is false.
- BudgetPolicy stringId 
- optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
- Catalog string
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- Cause string
- Channel string
- optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: CURRENT(default) andPREVIEW.
- ClusterId string
- Clusters
List<PipelineCluster> 
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscaleblock is extended with themodeparameter that controls the autoscaling algorithm (possible values areENHANCEDfor new, enhanced autoscaling algorithm, orLEGACYfor old algorithm).
- Configuration Dictionary<string, string>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is false.
- CreatorUser stringName 
- Deployment
PipelineDeployment 
- Deployment type of this pipeline. Supports following attributes:
- Development bool
- A flag indicating whether to run the pipeline in development mode. The default value is false.
- Edition string
- optional name of the product edition. Supported values are: CORE,PRO,ADVANCED(default). Not required whenserverlessis set totrue.
- Environment
PipelineEnvironment 
- EventLog PipelineEvent Log 
- an optional block specifying a table where LDP Event Log will be stored. Consists of the following fields:
- ExpectedLast intModified 
- Filters
PipelineFilters 
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- GatewayDefinition PipelineGateway Definition 
- The definition of a gateway pipeline to support CDC. Consists of following attributes:
- Health string
- IngestionDefinition PipelineIngestion Definition 
- LastModified int
- LatestUpdates List<PipelineLatest Update> 
- Libraries
List<PipelineLibrary> 
- blocks - Specifies pipeline code.
- Name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Notifications
List<PipelineNotification> 
- Photon bool
- A flag indicating whether to use Photon engine. The default value is false.
- RestartWindow PipelineRestart Window 
- RootPath string
- An optional string specifying the root path for this pipeline. This is used as the root directory when editing the pipeline in the Databricks user interface and it is added to sys.pathwhen executing Python sources during pipeline execution.
- RunAs PipelineRun As 
- RunAs stringUser Name 
- Schema string
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- Serverless bool
- An optional flag indicating if serverless compute should be used for this Lakeflow Declarative Pipeline. Requires catalogto be set, as it could be used only with Unity Catalog.
- State string
- Storage string
- A location on cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).
- Dictionary<string, string>
- A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline.
- Target string
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Trigger
PipelineTrigger 
- Url string
- URL of the Lakeflow Declarative Pipeline on the given workspace.
- AllowDuplicate boolNames 
- Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is false.
- BudgetPolicy stringId 
- optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
- Catalog string
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- Cause string
- Channel string
- optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: CURRENT(default) andPREVIEW.
- ClusterId string
- Clusters
[]PipelineCluster Args 
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscaleblock is extended with themodeparameter that controls the autoscaling algorithm (possible values areENHANCEDfor new, enhanced autoscaling algorithm, orLEGACYfor old algorithm).
- Configuration map[string]string
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is false.
- CreatorUser stringName 
- Deployment
PipelineDeployment Args 
- Deployment type of this pipeline. Supports following attributes:
- Development bool
- A flag indicating whether to run the pipeline in development mode. The default value is false.
- Edition string
- optional name of the product edition. Supported values are: CORE,PRO,ADVANCED(default). Not required whenserverlessis set totrue.
- Environment
PipelineEnvironment Args 
- EventLog PipelineEvent Log Args 
- an optional block specifying a table where LDP Event Log will be stored. Consists of the following fields:
- ExpectedLast intModified 
- Filters
PipelineFilters Args 
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- GatewayDefinition PipelineGateway Definition Args 
- The definition of a gateway pipeline to support CDC. Consists of following attributes:
- Health string
- IngestionDefinition PipelineIngestion Definition Args 
- LastModified int
- LatestUpdates []PipelineLatest Update Args 
- Libraries
[]PipelineLibrary Args 
- blocks - Specifies pipeline code.
- Name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Notifications
[]PipelineNotification Args 
- Photon bool
- A flag indicating whether to use Photon engine. The default value is false.
- RestartWindow PipelineRestart Window Args 
- RootPath string
- An optional string specifying the root path for this pipeline. This is used as the root directory when editing the pipeline in the Databricks user interface and it is added to sys.pathwhen executing Python sources during pipeline execution.
- RunAs PipelineRun As Args 
- RunAs stringUser Name 
- Schema string
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- Serverless bool
- An optional flag indicating if serverless compute should be used for this Lakeflow Declarative Pipeline. Requires catalogto be set, as it could be used only with Unity Catalog.
- State string
- Storage string
- A location on cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).
- map[string]string
- A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline.
- Target string
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Trigger
PipelineTrigger Args 
- Url string
- URL of the Lakeflow Declarative Pipeline on the given workspace.
- allowDuplicate BooleanNames 
- Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is false.
- budgetPolicy StringId 
- optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
- catalog String
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- cause String
- channel String
- optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: CURRENT(default) andPREVIEW.
- clusterId String
- clusters
List<PipelineCluster> 
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscaleblock is extended with themodeparameter that controls the autoscaling algorithm (possible values areENHANCEDfor new, enhanced autoscaling algorithm, orLEGACYfor old algorithm).
- configuration Map<String,String>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
- A flag indicating whether to run the pipeline continuously. The default value is false.
- creatorUser StringName 
- deployment
PipelineDeployment 
- Deployment type of this pipeline. Supports following attributes:
- development Boolean
- A flag indicating whether to run the pipeline in development mode. The default value is false.
- edition String
- optional name of the product edition. Supported values are: CORE,PRO,ADVANCED(default). Not required whenserverlessis set totrue.
- environment
PipelineEnvironment 
- eventLog PipelineEvent Log 
- an optional block specifying a table where LDP Event Log will be stored. Consists of the following fields:
- expectedLast IntegerModified 
- filters
PipelineFilters 
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gatewayDefinition PipelineGateway Definition 
- The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health String
- ingestionDefinition PipelineIngestion Definition 
- lastModified Integer
- latestUpdates List<PipelineLatest Update> 
- libraries
List<PipelineLibrary> 
- blocks - Specifies pipeline code.
- name String
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
List<PipelineNotification> 
- photon Boolean
- A flag indicating whether to use Photon engine. The default value is false.
- restartWindow PipelineRestart Window 
- rootPath String
- An optional string specifying the root path for this pipeline. This is used as the root directory when editing the pipeline in the Databricks user interface and it is added to sys.pathwhen executing Python sources during pipeline execution.
- runAs PipelineRun As 
- runAs StringUser Name 
- schema String
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless Boolean
- An optional flag indicating if serverless compute should be used for this Lakeflow Declarative Pipeline. Requires catalogto be set, as it could be used only with Unity Catalog.
- state String
- storage String
- A location on cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).
- Map<String,String>
- A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline.
- target String
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger
PipelineTrigger 
- url String
- URL of the Lakeflow Declarative Pipeline on the given workspace.
- allowDuplicate booleanNames 
- Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is false.
- budgetPolicy stringId 
- optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
- catalog string
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- cause string
- channel string
- optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: CURRENT(default) andPREVIEW.
- clusterId string
- clusters
PipelineCluster[] 
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscaleblock is extended with themodeparameter that controls the autoscaling algorithm (possible values areENHANCEDfor new, enhanced autoscaling algorithm, orLEGACYfor old algorithm).
- configuration {[key: string]: string}
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous boolean
- A flag indicating whether to run the pipeline continuously. The default value is false.
- creatorUser stringName 
- deployment
PipelineDeployment 
- Deployment type of this pipeline. Supports following attributes:
- development boolean
- A flag indicating whether to run the pipeline in development mode. The default value is false.
- edition string
- optional name of the product edition. Supported values are: CORE,PRO,ADVANCED(default). Not required whenserverlessis set totrue.
- environment
PipelineEnvironment 
- eventLog PipelineEvent Log 
- an optional block specifying a table where LDP Event Log will be stored. Consists of the following fields:
- expectedLast numberModified 
- filters
PipelineFilters 
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gatewayDefinition PipelineGateway Definition 
- The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health string
- ingestionDefinition PipelineIngestion Definition 
- lastModified number
- latestUpdates PipelineLatest Update[] 
- libraries
PipelineLibrary[] 
- blocks - Specifies pipeline code.
- name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
PipelineNotification[] 
- photon boolean
- A flag indicating whether to use Photon engine. The default value is false.
- restartWindow PipelineRestart Window 
- rootPath string
- An optional string specifying the root path for this pipeline. This is used as the root directory when editing the pipeline in the Databricks user interface and it is added to sys.pathwhen executing Python sources during pipeline execution.
- runAs PipelineRun As 
- runAs stringUser Name 
- schema string
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless boolean
- An optional flag indicating if serverless compute should be used for this Lakeflow Declarative Pipeline. Requires catalogto be set, as it could be used only with Unity Catalog.
- state string
- storage string
- A location on cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).
- {[key: string]: string}
- A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline.
- target string
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger
PipelineTrigger 
- url string
- URL of the Lakeflow Declarative Pipeline on the given workspace.
- allow_duplicate_ boolnames 
- Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is false.
- budget_policy_ strid 
- optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
- catalog str
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- cause str
- channel str
- optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: CURRENT(default) andPREVIEW.
- cluster_id str
- clusters
Sequence[PipelineCluster Args] 
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscaleblock is extended with themodeparameter that controls the autoscaling algorithm (possible values areENHANCEDfor new, enhanced autoscaling algorithm, orLEGACYfor old algorithm).
- configuration Mapping[str, str]
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is false.
- creator_user_ strname 
- deployment
PipelineDeployment Args 
- Deployment type of this pipeline. Supports following attributes:
- development bool
- A flag indicating whether to run the pipeline in development mode. The default value is false.
- edition str
- optional name of the product edition. Supported values are: CORE,PRO,ADVANCED(default). Not required whenserverlessis set totrue.
- environment
PipelineEnvironment Args 
- event_log PipelineEvent Log Args 
- an optional block specifying a table where LDP Event Log will be stored. Consists of the following fields:
- expected_last_ intmodified 
- filters
PipelineFilters Args 
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gateway_definition PipelineGateway Definition Args 
- The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health str
- ingestion_definition PipelineIngestion Definition Args 
- last_modified int
- latest_updates Sequence[PipelineLatest Update Args] 
- libraries
Sequence[PipelineLibrary Args] 
- blocks - Specifies pipeline code.
- name str
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
Sequence[PipelineNotification Args] 
- photon bool
- A flag indicating whether to use Photon engine. The default value is false.
- restart_window PipelineRestart Window Args 
- root_path str
- An optional string specifying the root path for this pipeline. This is used as the root directory when editing the pipeline in the Databricks user interface and it is added to sys.pathwhen executing Python sources during pipeline execution.
- run_as PipelineRun As Args 
- run_as_ struser_ name 
- schema str
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless bool
- An optional flag indicating if serverless compute should be used for this Lakeflow Declarative Pipeline. Requires catalogto be set, as it could be used only with Unity Catalog.
- state str
- storage str
- A location on cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).
- Mapping[str, str]
- A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline.
- target str
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger
PipelineTrigger Args 
- url str
- URL of the Lakeflow Declarative Pipeline on the given workspace.
- allowDuplicate BooleanNames 
- Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is false.
- budgetPolicy StringId 
- optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
- catalog String
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- cause String
- channel String
- optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: CURRENT(default) andPREVIEW.
- clusterId String
- clusters List<Property Map>
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscaleblock is extended with themodeparameter that controls the autoscaling algorithm (possible values areENHANCEDfor new, enhanced autoscaling algorithm, orLEGACYfor old algorithm).
- configuration Map<String>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
- A flag indicating whether to run the pipeline continuously. The default value is false.
- creatorUser StringName 
- deployment Property Map
- Deployment type of this pipeline. Supports following attributes:
- development Boolean
- A flag indicating whether to run the pipeline in development mode. The default value is false.
- edition String
- optional name of the product edition. Supported values are: CORE,PRO,ADVANCED(default). Not required whenserverlessis set totrue.
- environment Property Map
- eventLog Property Map
- an optional block specifying a table where LDP Event Log will be stored. Consists of the following fields:
- expectedLast NumberModified 
- filters Property Map
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gatewayDefinition Property Map
- The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health String
- ingestionDefinition Property Map
- lastModified Number
- latestUpdates List<Property Map>
- libraries List<Property Map>
- blocks - Specifies pipeline code.
- name String
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications List<Property Map>
- photon Boolean
- A flag indicating whether to use Photon engine. The default value is false.
- restartWindow Property Map
- rootPath String
- An optional string specifying the root path for this pipeline. This is used as the root directory when editing the pipeline in the Databricks user interface and it is added to sys.pathwhen executing Python sources during pipeline execution.
- runAs Property Map
- runAs StringUser Name 
- schema String
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless Boolean
- An optional flag indicating if serverless compute should be used for this Lakeflow Declarative Pipeline. Requires catalogto be set, as it could be used only with Unity Catalog.
- state String
- storage String
- A location on cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).
- Map<String>
- A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline.
- target String
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger Property Map
- url String
- URL of the Lakeflow Declarative Pipeline on the given workspace.
Outputs
All input properties are implicitly available as output properties. Additionally, the Pipeline resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing Pipeline Resource
Get an existing Pipeline resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: PipelineState, opts?: CustomResourceOptions): Pipeline@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        allow_duplicate_names: Optional[bool] = None,
        budget_policy_id: Optional[str] = None,
        catalog: Optional[str] = None,
        cause: Optional[str] = None,
        channel: Optional[str] = None,
        cluster_id: Optional[str] = None,
        clusters: Optional[Sequence[PipelineClusterArgs]] = None,
        configuration: Optional[Mapping[str, str]] = None,
        continuous: Optional[bool] = None,
        creator_user_name: Optional[str] = None,
        deployment: Optional[PipelineDeploymentArgs] = None,
        development: Optional[bool] = None,
        edition: Optional[str] = None,
        environment: Optional[PipelineEnvironmentArgs] = None,
        event_log: Optional[PipelineEventLogArgs] = None,
        expected_last_modified: Optional[int] = None,
        filters: Optional[PipelineFiltersArgs] = None,
        gateway_definition: Optional[PipelineGatewayDefinitionArgs] = None,
        health: Optional[str] = None,
        ingestion_definition: Optional[PipelineIngestionDefinitionArgs] = None,
        last_modified: Optional[int] = None,
        latest_updates: Optional[Sequence[PipelineLatestUpdateArgs]] = None,
        libraries: Optional[Sequence[PipelineLibraryArgs]] = None,
        name: Optional[str] = None,
        notifications: Optional[Sequence[PipelineNotificationArgs]] = None,
        photon: Optional[bool] = None,
        restart_window: Optional[PipelineRestartWindowArgs] = None,
        root_path: Optional[str] = None,
        run_as: Optional[PipelineRunAsArgs] = None,
        run_as_user_name: Optional[str] = None,
        schema: Optional[str] = None,
        serverless: Optional[bool] = None,
        state: Optional[str] = None,
        storage: Optional[str] = None,
        tags: Optional[Mapping[str, str]] = None,
        target: Optional[str] = None,
        trigger: Optional[PipelineTriggerArgs] = None,
        url: Optional[str] = None) -> Pipelinefunc GetPipeline(ctx *Context, name string, id IDInput, state *PipelineState, opts ...ResourceOption) (*Pipeline, error)public static Pipeline Get(string name, Input<string> id, PipelineState? state, CustomResourceOptions? opts = null)public static Pipeline get(String name, Output<String> id, PipelineState state, CustomResourceOptions options)resources:  _:    type: databricks:Pipeline    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- AllowDuplicate boolNames 
- Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is false.
- BudgetPolicy stringId 
- optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
- Catalog string
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- Cause string
- Channel string
- optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: CURRENT(default) andPREVIEW.
- ClusterId string
- Clusters
List<PipelineCluster> 
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscaleblock is extended with themodeparameter that controls the autoscaling algorithm (possible values areENHANCEDfor new, enhanced autoscaling algorithm, orLEGACYfor old algorithm).
- Configuration Dictionary<string, string>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is false.
- CreatorUser stringName 
- Deployment
PipelineDeployment 
- Deployment type of this pipeline. Supports following attributes:
- Development bool
- A flag indicating whether to run the pipeline in development mode. The default value is false.
- Edition string
- optional name of the product edition. Supported values are: CORE,PRO,ADVANCED(default). Not required whenserverlessis set totrue.
- Environment
PipelineEnvironment 
- EventLog PipelineEvent Log 
- an optional block specifying a table where LDP Event Log will be stored. Consists of the following fields:
- ExpectedLast intModified 
- Filters
PipelineFilters 
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- GatewayDefinition PipelineGateway Definition 
- The definition of a gateway pipeline to support CDC. Consists of following attributes:
- Health string
- IngestionDefinition PipelineIngestion Definition 
- LastModified int
- LatestUpdates List<PipelineLatest Update> 
- Libraries
List<PipelineLibrary> 
- blocks - Specifies pipeline code.
- Name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Notifications
List<PipelineNotification> 
- Photon bool
- A flag indicating whether to use Photon engine. The default value is false.
- RestartWindow PipelineRestart Window 
- RootPath string
- An optional string specifying the root path for this pipeline. This is used as the root directory when editing the pipeline in the Databricks user interface and it is added to sys.pathwhen executing Python sources during pipeline execution.
- RunAs PipelineRun As 
- RunAs stringUser Name 
- Schema string
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- Serverless bool
- An optional flag indicating if serverless compute should be used for this Lakeflow Declarative Pipeline. Requires catalogto be set, as it could be used only with Unity Catalog.
- State string
- Storage string
- A location on cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).
- Dictionary<string, string>
- A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline.
- Target string
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Trigger
PipelineTrigger 
- Url string
- URL of the Lakeflow Declarative Pipeline on the given workspace.
- AllowDuplicate boolNames 
- Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is false.
- BudgetPolicy stringId 
- optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
- Catalog string
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- Cause string
- Channel string
- optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: CURRENT(default) andPREVIEW.
- ClusterId string
- Clusters
[]PipelineCluster Args 
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscaleblock is extended with themodeparameter that controls the autoscaling algorithm (possible values areENHANCEDfor new, enhanced autoscaling algorithm, orLEGACYfor old algorithm).
- Configuration map[string]string
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is false.
- CreatorUser stringName 
- Deployment
PipelineDeployment Args 
- Deployment type of this pipeline. Supports following attributes:
- Development bool
- A flag indicating whether to run the pipeline in development mode. The default value is false.
- Edition string
- optional name of the product edition. Supported values are: CORE,PRO,ADVANCED(default). Not required whenserverlessis set totrue.
- Environment
PipelineEnvironment Args 
- EventLog PipelineEvent Log Args 
- an optional block specifying a table where LDP Event Log will be stored. Consists of the following fields:
- ExpectedLast intModified 
- Filters
PipelineFilters Args 
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- GatewayDefinition PipelineGateway Definition Args 
- The definition of a gateway pipeline to support CDC. Consists of following attributes:
- Health string
- IngestionDefinition PipelineIngestion Definition Args 
- LastModified int
- LatestUpdates []PipelineLatest Update Args 
- Libraries
[]PipelineLibrary Args 
- blocks - Specifies pipeline code.
- Name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Notifications
[]PipelineNotification Args 
- Photon bool
- A flag indicating whether to use Photon engine. The default value is false.
- RestartWindow PipelineRestart Window Args 
- RootPath string
- An optional string specifying the root path for this pipeline. This is used as the root directory when editing the pipeline in the Databricks user interface and it is added to sys.pathwhen executing Python sources during pipeline execution.
- RunAs PipelineRun As Args 
- RunAs stringUser Name 
- Schema string
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- Serverless bool
- An optional flag indicating if serverless compute should be used for this Lakeflow Declarative Pipeline. Requires catalogto be set, as it could be used only with Unity Catalog.
- State string
- Storage string
- A location on cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).
- map[string]string
- A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline.
- Target string
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Trigger
PipelineTrigger Args 
- Url string
- URL of the Lakeflow Declarative Pipeline on the given workspace.
- allowDuplicate BooleanNames 
- Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is false.
- budgetPolicy StringId 
- optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
- catalog String
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- cause String
- channel String
- optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: CURRENT(default) andPREVIEW.
- clusterId String
- clusters
List<PipelineCluster> 
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscaleblock is extended with themodeparameter that controls the autoscaling algorithm (possible values areENHANCEDfor new, enhanced autoscaling algorithm, orLEGACYfor old algorithm).
- configuration Map<String,String>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
- A flag indicating whether to run the pipeline continuously. The default value is false.
- creatorUser StringName 
- deployment
PipelineDeployment 
- Deployment type of this pipeline. Supports following attributes:
- development Boolean
- A flag indicating whether to run the pipeline in development mode. The default value is false.
- edition String
- optional name of the product edition. Supported values are: CORE,PRO,ADVANCED(default). Not required whenserverlessis set totrue.
- environment
PipelineEnvironment 
- eventLog PipelineEvent Log 
- an optional block specifying a table where LDP Event Log will be stored. Consists of the following fields:
- expectedLast IntegerModified 
- filters
PipelineFilters 
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gatewayDefinition PipelineGateway Definition 
- The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health String
- ingestionDefinition PipelineIngestion Definition 
- lastModified Integer
- latestUpdates List<PipelineLatest Update> 
- libraries
List<PipelineLibrary> 
- blocks - Specifies pipeline code.
- name String
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
List<PipelineNotification> 
- photon Boolean
- A flag indicating whether to use Photon engine. The default value is false.
- restartWindow PipelineRestart Window 
- rootPath String
- An optional string specifying the root path for this pipeline. This is used as the root directory when editing the pipeline in the Databricks user interface and it is added to sys.pathwhen executing Python sources during pipeline execution.
- runAs PipelineRun As 
- runAs StringUser Name 
- schema String
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless Boolean
- An optional flag indicating if serverless compute should be used for this Lakeflow Declarative Pipeline. Requires catalogto be set, as it could be used only with Unity Catalog.
- state String
- storage String
- A location on cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).
- Map<String,String>
- A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline.
- target String
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger
PipelineTrigger 
- url String
- URL of the Lakeflow Declarative Pipeline on the given workspace.
- allowDuplicate booleanNames 
- Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is false.
- budgetPolicy stringId 
- optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
- catalog string
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- cause string
- channel string
- optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: CURRENT(default) andPREVIEW.
- clusterId string
- clusters
PipelineCluster[] 
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscaleblock is extended with themodeparameter that controls the autoscaling algorithm (possible values areENHANCEDfor new, enhanced autoscaling algorithm, orLEGACYfor old algorithm).
- configuration {[key: string]: string}
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous boolean
- A flag indicating whether to run the pipeline continuously. The default value is false.
- creatorUser stringName 
- deployment
PipelineDeployment 
- Deployment type of this pipeline. Supports following attributes:
- development boolean
- A flag indicating whether to run the pipeline in development mode. The default value is false.
- edition string
- optional name of the product edition. Supported values are: CORE,PRO,ADVANCED(default). Not required whenserverlessis set totrue.
- environment
PipelineEnvironment 
- eventLog PipelineEvent Log 
- an optional block specifying a table where LDP Event Log will be stored. Consists of the following fields:
- expectedLast numberModified 
- filters
PipelineFilters 
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gatewayDefinition PipelineGateway Definition 
- The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health string
- ingestionDefinition PipelineIngestion Definition 
- lastModified number
- latestUpdates PipelineLatest Update[] 
- libraries
PipelineLibrary[] 
- blocks - Specifies pipeline code.
- name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
PipelineNotification[] 
- photon boolean
- A flag indicating whether to use Photon engine. The default value is false.
- restartWindow PipelineRestart Window 
- rootPath string
- An optional string specifying the root path for this pipeline. This is used as the root directory when editing the pipeline in the Databricks user interface and it is added to sys.pathwhen executing Python sources during pipeline execution.
- runAs PipelineRun As 
- runAs stringUser Name 
- schema string
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless boolean
- An optional flag indicating if serverless compute should be used for this Lakeflow Declarative Pipeline. Requires catalogto be set, as it could be used only with Unity Catalog.
- state string
- storage string
- A location on cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).
- {[key: string]: string}
- A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline.
- target string
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger
PipelineTrigger 
- url string
- URL of the Lakeflow Declarative Pipeline on the given workspace.
- allow_duplicate_ boolnames 
- Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is false.
- budget_policy_ strid 
- optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
- catalog str
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- cause str
- channel str
- optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: CURRENT(default) andPREVIEW.
- cluster_id str
- clusters
Sequence[PipelineCluster Args] 
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscaleblock is extended with themodeparameter that controls the autoscaling algorithm (possible values areENHANCEDfor new, enhanced autoscaling algorithm, orLEGACYfor old algorithm).
- configuration Mapping[str, str]
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is false.
- creator_user_ strname 
- deployment
PipelineDeployment Args 
- Deployment type of this pipeline. Supports following attributes:
- development bool
- A flag indicating whether to run the pipeline in development mode. The default value is false.
- edition str
- optional name of the product edition. Supported values are: CORE,PRO,ADVANCED(default). Not required whenserverlessis set totrue.
- environment
PipelineEnvironment Args 
- event_log PipelineEvent Log Args 
- an optional block specifying a table where LDP Event Log will be stored. Consists of the following fields:
- expected_last_ intmodified 
- filters
PipelineFilters Args 
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gateway_definition PipelineGateway Definition Args 
- The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health str
- ingestion_definition PipelineIngestion Definition Args 
- last_modified int
- latest_updates Sequence[PipelineLatest Update Args] 
- libraries
Sequence[PipelineLibrary Args] 
- blocks - Specifies pipeline code.
- name str
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
Sequence[PipelineNotification Args] 
- photon bool
- A flag indicating whether to use Photon engine. The default value is false.
- restart_window PipelineRestart Window Args 
- root_path str
- An optional string specifying the root path for this pipeline. This is used as the root directory when editing the pipeline in the Databricks user interface and it is added to sys.pathwhen executing Python sources during pipeline execution.
- run_as PipelineRun As Args 
- run_as_ struser_ name 
- schema str
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless bool
- An optional flag indicating if serverless compute should be used for this Lakeflow Declarative Pipeline. Requires catalogto be set, as it could be used only with Unity Catalog.
- state str
- storage str
- A location on cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).
- Mapping[str, str]
- A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline.
- target str
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger
PipelineTrigger Args 
- url str
- URL of the Lakeflow Declarative Pipeline on the given workspace.
- allowDuplicate BooleanNames 
- Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is false.
- budgetPolicy StringId 
- optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
- catalog String
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- cause String
- channel String
- optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: CURRENT(default) andPREVIEW.
- clusterId String
- clusters List<Property Map>
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that autoscaleblock is extended with themodeparameter that controls the autoscaling algorithm (possible values areENHANCEDfor new, enhanced autoscaling algorithm, orLEGACYfor old algorithm).
- configuration Map<String>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
- A flag indicating whether to run the pipeline continuously. The default value is false.
- creatorUser StringName 
- deployment Property Map
- Deployment type of this pipeline. Supports following attributes:
- development Boolean
- A flag indicating whether to run the pipeline in development mode. The default value is false.
- edition String
- optional name of the product edition. Supported values are: CORE,PRO,ADVANCED(default). Not required whenserverlessis set totrue.
- environment Property Map
- eventLog Property Map
- an optional block specifying a table where LDP Event Log will be stored. Consists of the following fields:
- expectedLast NumberModified 
- filters Property Map
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gatewayDefinition Property Map
- The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health String
- ingestionDefinition Property Map
- lastModified Number
- latestUpdates List<Property Map>
- libraries List<Property Map>
- blocks - Specifies pipeline code.
- name String
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications List<Property Map>
- photon Boolean
- A flag indicating whether to use Photon engine. The default value is false.
- restartWindow Property Map
- rootPath String
- An optional string specifying the root path for this pipeline. This is used as the root directory when editing the pipeline in the Databricks user interface and it is added to sys.pathwhen executing Python sources during pipeline execution.
- runAs Property Map
- runAs StringUser Name 
- schema String
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless Boolean
- An optional flag indicating if serverless compute should be used for this Lakeflow Declarative Pipeline. Requires catalogto be set, as it could be used only with Unity Catalog.
- state String
- storage String
- A location on cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with catalog).
- Map<String>
- A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline.
- target String
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger Property Map
- url String
- URL of the Lakeflow Declarative Pipeline on the given workspace.
Supporting Types
PipelineCluster, PipelineClusterArgs    
- ApplyPolicy boolDefault Values 
- Autoscale
PipelineCluster Autoscale 
- AwsAttributes PipelineCluster Aws Attributes 
- AzureAttributes PipelineCluster Azure Attributes 
- ClusterLog PipelineConf Cluster Cluster Log Conf 
- Dictionary<string, string>
- DriverInstance stringPool Id 
- DriverNode stringType Id 
- EnableLocal boolDisk Encryption 
- GcpAttributes PipelineCluster Gcp Attributes 
- InitScripts List<PipelineCluster Init Script> 
- InstancePool stringId 
- Label string
- NodeType stringId 
- NumWorkers int
- PolicyId string
- SparkConf Dictionary<string, string>
- SparkEnv Dictionary<string, string>Vars 
- SshPublic List<string>Keys 
- ApplyPolicy boolDefault Values 
- Autoscale
PipelineCluster Autoscale 
- AwsAttributes PipelineCluster Aws Attributes 
- AzureAttributes PipelineCluster Azure Attributes 
- ClusterLog PipelineConf Cluster Cluster Log Conf 
- map[string]string
- DriverInstance stringPool Id 
- DriverNode stringType Id 
- EnableLocal boolDisk Encryption 
- GcpAttributes PipelineCluster Gcp Attributes 
- InitScripts []PipelineCluster Init Script 
- InstancePool stringId 
- Label string
- NodeType stringId 
- NumWorkers int
- PolicyId string
- SparkConf map[string]string
- SparkEnv map[string]stringVars 
- SshPublic []stringKeys 
- applyPolicy BooleanDefault Values 
- autoscale
PipelineCluster Autoscale 
- awsAttributes PipelineCluster Aws Attributes 
- azureAttributes PipelineCluster Azure Attributes 
- clusterLog PipelineConf Cluster Cluster Log Conf 
- Map<String,String>
- driverInstance StringPool Id 
- driverNode StringType Id 
- enableLocal BooleanDisk Encryption 
- gcpAttributes PipelineCluster Gcp Attributes 
- initScripts List<PipelineCluster Init Script> 
- instancePool StringId 
- label String
- nodeType StringId 
- numWorkers Integer
- policyId String
- sparkConf Map<String,String>
- sparkEnv Map<String,String>Vars 
- sshPublic List<String>Keys 
- applyPolicy booleanDefault Values 
- autoscale
PipelineCluster Autoscale 
- awsAttributes PipelineCluster Aws Attributes 
- azureAttributes PipelineCluster Azure Attributes 
- clusterLog PipelineConf Cluster Cluster Log Conf 
- {[key: string]: string}
- driverInstance stringPool Id 
- driverNode stringType Id 
- enableLocal booleanDisk Encryption 
- gcpAttributes PipelineCluster Gcp Attributes 
- initScripts PipelineCluster Init Script[] 
- instancePool stringId 
- label string
- nodeType stringId 
- numWorkers number
- policyId string
- sparkConf {[key: string]: string}
- sparkEnv {[key: string]: string}Vars 
- sshPublic string[]Keys 
- apply_policy_ booldefault_ values 
- autoscale
PipelineCluster Autoscale 
- aws_attributes PipelineCluster Aws Attributes 
- azure_attributes PipelineCluster Azure Attributes 
- cluster_log_ Pipelineconf Cluster Cluster Log Conf 
- Mapping[str, str]
- driver_instance_ strpool_ id 
- driver_node_ strtype_ id 
- enable_local_ booldisk_ encryption 
- gcp_attributes PipelineCluster Gcp Attributes 
- init_scripts Sequence[PipelineCluster Init Script] 
- instance_pool_ strid 
- label str
- node_type_ strid 
- num_workers int
- policy_id str
- spark_conf Mapping[str, str]
- spark_env_ Mapping[str, str]vars 
- ssh_public_ Sequence[str]keys 
- applyPolicy BooleanDefault Values 
- autoscale Property Map
- awsAttributes Property Map
- azureAttributes Property Map
- clusterLog Property MapConf 
- Map<String>
- driverInstance StringPool Id 
- driverNode StringType Id 
- enableLocal BooleanDisk Encryption 
- gcpAttributes Property Map
- initScripts List<Property Map>
- instancePool StringId 
- label String
- nodeType StringId 
- numWorkers Number
- policyId String
- sparkConf Map<String>
- sparkEnv Map<String>Vars 
- sshPublic List<String>Keys 
PipelineClusterAutoscale, PipelineClusterAutoscaleArgs      
- MaxWorkers int
- MinWorkers int
- Mode string
- MaxWorkers int
- MinWorkers int
- Mode string
- maxWorkers Integer
- minWorkers Integer
- mode String
- maxWorkers number
- minWorkers number
- mode string
- max_workers int
- min_workers int
- mode str
- maxWorkers Number
- minWorkers Number
- mode String
PipelineClusterAwsAttributes, PipelineClusterAwsAttributesArgs        
- Availability string
- EbsVolume intCount 
- EbsVolume intIops 
- EbsVolume intSize 
- EbsVolume intThroughput 
- EbsVolume stringType 
- FirstOn intDemand 
- InstanceProfile stringArn 
- SpotBid intPrice Percent 
- ZoneId string
- Availability string
- EbsVolume intCount 
- EbsVolume intIops 
- EbsVolume intSize 
- EbsVolume intThroughput 
- EbsVolume stringType 
- FirstOn intDemand 
- InstanceProfile stringArn 
- SpotBid intPrice Percent 
- ZoneId string
- availability String
- ebsVolume IntegerCount 
- ebsVolume IntegerIops 
- ebsVolume IntegerSize 
- ebsVolume IntegerThroughput 
- ebsVolume StringType 
- firstOn IntegerDemand 
- instanceProfile StringArn 
- spotBid IntegerPrice Percent 
- zoneId String
- availability string
- ebsVolume numberCount 
- ebsVolume numberIops 
- ebsVolume numberSize 
- ebsVolume numberThroughput 
- ebsVolume stringType 
- firstOn numberDemand 
- instanceProfile stringArn 
- spotBid numberPrice Percent 
- zoneId string
- availability str
- ebs_volume_ intcount 
- ebs_volume_ intiops 
- ebs_volume_ intsize 
- ebs_volume_ intthroughput 
- ebs_volume_ strtype 
- first_on_ intdemand 
- instance_profile_ strarn 
- spot_bid_ intprice_ percent 
- zone_id str
- availability String
- ebsVolume NumberCount 
- ebsVolume NumberIops 
- ebsVolume NumberSize 
- ebsVolume NumberThroughput 
- ebsVolume StringType 
- firstOn NumberDemand 
- instanceProfile StringArn 
- spotBid NumberPrice Percent 
- zoneId String
PipelineClusterAzureAttributes, PipelineClusterAzureAttributesArgs        
- availability String
- firstOn NumberDemand 
- logAnalytics Property MapInfo 
- spotBid NumberMax Price 
PipelineClusterAzureAttributesLogAnalyticsInfo, PipelineClusterAzureAttributesLogAnalyticsInfoArgs              
- LogAnalytics stringPrimary Key 
- LogAnalytics stringWorkspace Id 
- LogAnalytics stringPrimary Key 
- LogAnalytics stringWorkspace Id 
- logAnalytics StringPrimary Key 
- logAnalytics StringWorkspace Id 
- logAnalytics stringPrimary Key 
- logAnalytics stringWorkspace Id 
- logAnalytics StringPrimary Key 
- logAnalytics StringWorkspace Id 
PipelineClusterClusterLogConf, PipelineClusterClusterLogConfArgs          
PipelineClusterClusterLogConfDbfs, PipelineClusterClusterLogConfDbfsArgs            
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterClusterLogConfS3, PipelineClusterClusterLogConfS3Args            
- Destination string
- CannedAcl string
- EnableEncryption bool
- EncryptionType string
- Endpoint string
- KmsKey string
- Region string
- Destination string
- CannedAcl string
- EnableEncryption bool
- EncryptionType string
- Endpoint string
- KmsKey string
- Region string
- destination String
- cannedAcl String
- enableEncryption Boolean
- encryptionType String
- endpoint String
- kmsKey String
- region String
- destination string
- cannedAcl string
- enableEncryption boolean
- encryptionType string
- endpoint string
- kmsKey string
- region string
- destination str
- canned_acl str
- enable_encryption bool
- encryption_type str
- endpoint str
- kms_key str
- region str
- destination String
- cannedAcl String
- enableEncryption Boolean
- encryptionType String
- endpoint String
- kmsKey String
- region String
PipelineClusterClusterLogConfVolumes, PipelineClusterClusterLogConfVolumesArgs            
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterGcpAttributes, PipelineClusterGcpAttributesArgs        
- Availability string
- FirstOn intDemand 
- GoogleService stringAccount 
- LocalSsd intCount 
- ZoneId string
- Availability string
- FirstOn intDemand 
- GoogleService stringAccount 
- LocalSsd intCount 
- ZoneId string
- availability String
- firstOn IntegerDemand 
- googleService StringAccount 
- localSsd IntegerCount 
- zoneId String
- availability string
- firstOn numberDemand 
- googleService stringAccount 
- localSsd numberCount 
- zoneId string
- availability str
- first_on_ intdemand 
- google_service_ straccount 
- local_ssd_ intcount 
- zone_id str
- availability String
- firstOn NumberDemand 
- googleService StringAccount 
- localSsd NumberCount 
- zoneId String
PipelineClusterInitScript, PipelineClusterInitScriptArgs        
- Abfss
PipelineCluster Init Script Abfss 
- Dbfs
PipelineCluster Init Script Dbfs 
- File
PipelineCluster Init Script File 
- specifies path to a file in Databricks Workspace to include as source. Actual path is specified as pathattribute inside the block.
- Gcs
PipelineCluster Init Script Gcs 
- S3
PipelineCluster Init Script S3 
- Volumes
PipelineCluster Init Script Volumes 
- Workspace
PipelineCluster Init Script Workspace 
- Abfss
PipelineCluster Init Script Abfss 
- Dbfs
PipelineCluster Init Script Dbfs 
- File
PipelineCluster Init Script File 
- specifies path to a file in Databricks Workspace to include as source. Actual path is specified as pathattribute inside the block.
- Gcs
PipelineCluster Init Script Gcs 
- S3
PipelineCluster Init Script S3 
- Volumes
PipelineCluster Init Script Volumes 
- Workspace
PipelineCluster Init Script Workspace 
- abfss
PipelineCluster Init Script Abfss 
- dbfs
PipelineCluster Init Script Dbfs 
- file
PipelineCluster Init Script File 
- specifies path to a file in Databricks Workspace to include as source. Actual path is specified as pathattribute inside the block.
- gcs
PipelineCluster Init Script Gcs 
- s3
PipelineCluster Init Script S3 
- volumes
PipelineCluster Init Script Volumes 
- workspace
PipelineCluster Init Script Workspace 
- abfss
PipelineCluster Init Script Abfss 
- dbfs
PipelineCluster Init Script Dbfs 
- file
PipelineCluster Init Script File 
- specifies path to a file in Databricks Workspace to include as source. Actual path is specified as pathattribute inside the block.
- gcs
PipelineCluster Init Script Gcs 
- s3
PipelineCluster Init Script S3 
- volumes
PipelineCluster Init Script Volumes 
- workspace
PipelineCluster Init Script Workspace 
- abfss
PipelineCluster Init Script Abfss 
- dbfs
PipelineCluster Init Script Dbfs 
- file
PipelineCluster Init Script File 
- specifies path to a file in Databricks Workspace to include as source. Actual path is specified as pathattribute inside the block.
- gcs
PipelineCluster Init Script Gcs 
- s3
PipelineCluster Init Script S3 
- volumes
PipelineCluster Init Script Volumes 
- workspace
PipelineCluster Init Script Workspace 
- abfss Property Map
- dbfs Property Map
- file Property Map
- specifies path to a file in Databricks Workspace to include as source. Actual path is specified as pathattribute inside the block.
- gcs Property Map
- s3 Property Map
- volumes Property Map
- workspace Property Map
PipelineClusterInitScriptAbfss, PipelineClusterInitScriptAbfssArgs          
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptDbfs, PipelineClusterInitScriptDbfsArgs          
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptFile, PipelineClusterInitScriptFileArgs          
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptGcs, PipelineClusterInitScriptGcsArgs          
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptS3, PipelineClusterInitScriptS3Args          
- Destination string
- CannedAcl string
- EnableEncryption bool
- EncryptionType string
- Endpoint string
- KmsKey string
- Region string
- Destination string
- CannedAcl string
- EnableEncryption bool
- EncryptionType string
- Endpoint string
- KmsKey string
- Region string
- destination String
- cannedAcl String
- enableEncryption Boolean
- encryptionType String
- endpoint String
- kmsKey String
- region String
- destination string
- cannedAcl string
- enableEncryption boolean
- encryptionType string
- endpoint string
- kmsKey string
- region string
- destination str
- canned_acl str
- enable_encryption bool
- encryption_type str
- endpoint str
- kms_key str
- region str
- destination String
- cannedAcl String
- enableEncryption Boolean
- encryptionType String
- endpoint String
- kmsKey String
- region String
PipelineClusterInitScriptVolumes, PipelineClusterInitScriptVolumesArgs          
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptWorkspace, PipelineClusterInitScriptWorkspaceArgs          
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineDeployment, PipelineDeploymentArgs    
- Kind string
- The deployment method that manages the pipeline.
- MetadataFile stringPath 
- The path to the file containing metadata about the deployment.
- Kind string
- The deployment method that manages the pipeline.
- MetadataFile stringPath 
- The path to the file containing metadata about the deployment.
- kind String
- The deployment method that manages the pipeline.
- metadataFile StringPath 
- The path to the file containing metadata about the deployment.
- kind string
- The deployment method that manages the pipeline.
- metadataFile stringPath 
- The path to the file containing metadata about the deployment.
- kind str
- The deployment method that manages the pipeline.
- metadata_file_ strpath 
- The path to the file containing metadata about the deployment.
- kind String
- The deployment method that manages the pipeline.
- metadataFile StringPath 
- The path to the file containing metadata about the deployment.
PipelineEnvironment, PipelineEnvironmentArgs    
- Dependencies List<string>
- a list of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information. - Example: - import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";- const _this = new databricks.Pipeline("this", { name: "Serverless demo", serverless: true, catalog: "main", schema: "ldp_demo", environment: { dependencies: [ "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", ], }, }); - import pulumi import pulumi_databricks as databricks this = databricks.Pipeline("this", name="Serverless demo", serverless=True, catalog="main", schema="ldp_demo", environment={ "dependencies": [ "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", ], })- using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var @this = new Databricks.Pipeline("this", new() { Name = "Serverless demo", Serverless = true, Catalog = "main", Schema = "ldp_demo", Environment = new Databricks.Inputs.PipelineEnvironmentArgs { Dependencies = new[] { "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", }, }, }); });- package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{ Name: pulumi.String("Serverless demo"), Serverless: pulumi.Bool(true), Catalog: pulumi.String("main"), Schema: pulumi.String("ldp_demo"), Environment: &databricks.PipelineEnvironmentArgs{ Dependencies: pulumi.StringArray{ pulumi.String("foo==0.0.1"), pulumi.String("-r /Workspace/Users/user.name/my-pipeline/requirements.txt"), pulumi.String("/Volumes/main/default/libs/my_lib.whl"), }, }, }) if err != nil { return err } return nil }) }- package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Pipeline; import com.pulumi.databricks.PipelineArgs; import com.pulumi.databricks.inputs.PipelineEnvironmentArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new Pipeline("this", PipelineArgs.builder() .name("Serverless demo") .serverless(true) .catalog("main") .schema("ldp_demo") .environment(PipelineEnvironmentArgs.builder() .dependencies( "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl") .build()) .build()); } }- resources: this: type: databricks:Pipeline properties: name: Serverless demo serverless: true catalog: main schema: ldp_demo environment: dependencies: - foo==0.0.1 - -r /Workspace/Users/user.name/my-pipeline/requirements.txt - /Volumes/main/default/libs/my_lib.whl
- Dependencies []string
- a list of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information. - Example: - import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";- const _this = new databricks.Pipeline("this", { name: "Serverless demo", serverless: true, catalog: "main", schema: "ldp_demo", environment: { dependencies: [ "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", ], }, }); - import pulumi import pulumi_databricks as databricks this = databricks.Pipeline("this", name="Serverless demo", serverless=True, catalog="main", schema="ldp_demo", environment={ "dependencies": [ "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", ], })- using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var @this = new Databricks.Pipeline("this", new() { Name = "Serverless demo", Serverless = true, Catalog = "main", Schema = "ldp_demo", Environment = new Databricks.Inputs.PipelineEnvironmentArgs { Dependencies = new[] { "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", }, }, }); });- package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{ Name: pulumi.String("Serverless demo"), Serverless: pulumi.Bool(true), Catalog: pulumi.String("main"), Schema: pulumi.String("ldp_demo"), Environment: &databricks.PipelineEnvironmentArgs{ Dependencies: pulumi.StringArray{ pulumi.String("foo==0.0.1"), pulumi.String("-r /Workspace/Users/user.name/my-pipeline/requirements.txt"), pulumi.String("/Volumes/main/default/libs/my_lib.whl"), }, }, }) if err != nil { return err } return nil }) }- package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Pipeline; import com.pulumi.databricks.PipelineArgs; import com.pulumi.databricks.inputs.PipelineEnvironmentArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new Pipeline("this", PipelineArgs.builder() .name("Serverless demo") .serverless(true) .catalog("main") .schema("ldp_demo") .environment(PipelineEnvironmentArgs.builder() .dependencies( "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl") .build()) .build()); } }- resources: this: type: databricks:Pipeline properties: name: Serverless demo serverless: true catalog: main schema: ldp_demo environment: dependencies: - foo==0.0.1 - -r /Workspace/Users/user.name/my-pipeline/requirements.txt - /Volumes/main/default/libs/my_lib.whl
- dependencies List<String>
- a list of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information. - Example: - import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";- const _this = new databricks.Pipeline("this", { name: "Serverless demo", serverless: true, catalog: "main", schema: "ldp_demo", environment: { dependencies: [ "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", ], }, }); - import pulumi import pulumi_databricks as databricks this = databricks.Pipeline("this", name="Serverless demo", serverless=True, catalog="main", schema="ldp_demo", environment={ "dependencies": [ "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", ], })- using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var @this = new Databricks.Pipeline("this", new() { Name = "Serverless demo", Serverless = true, Catalog = "main", Schema = "ldp_demo", Environment = new Databricks.Inputs.PipelineEnvironmentArgs { Dependencies = new[] { "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", }, }, }); });- package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{ Name: pulumi.String("Serverless demo"), Serverless: pulumi.Bool(true), Catalog: pulumi.String("main"), Schema: pulumi.String("ldp_demo"), Environment: &databricks.PipelineEnvironmentArgs{ Dependencies: pulumi.StringArray{ pulumi.String("foo==0.0.1"), pulumi.String("-r /Workspace/Users/user.name/my-pipeline/requirements.txt"), pulumi.String("/Volumes/main/default/libs/my_lib.whl"), }, }, }) if err != nil { return err } return nil }) }- package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Pipeline; import com.pulumi.databricks.PipelineArgs; import com.pulumi.databricks.inputs.PipelineEnvironmentArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new Pipeline("this", PipelineArgs.builder() .name("Serverless demo") .serverless(true) .catalog("main") .schema("ldp_demo") .environment(PipelineEnvironmentArgs.builder() .dependencies( "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl") .build()) .build()); } }- resources: this: type: databricks:Pipeline properties: name: Serverless demo serverless: true catalog: main schema: ldp_demo environment: dependencies: - foo==0.0.1 - -r /Workspace/Users/user.name/my-pipeline/requirements.txt - /Volumes/main/default/libs/my_lib.whl
- dependencies string[]
- a list of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information. - Example: - import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";- const _this = new databricks.Pipeline("this", { name: "Serverless demo", serverless: true, catalog: "main", schema: "ldp_demo", environment: { dependencies: [ "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", ], }, }); - import pulumi import pulumi_databricks as databricks this = databricks.Pipeline("this", name="Serverless demo", serverless=True, catalog="main", schema="ldp_demo", environment={ "dependencies": [ "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", ], })- using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var @this = new Databricks.Pipeline("this", new() { Name = "Serverless demo", Serverless = true, Catalog = "main", Schema = "ldp_demo", Environment = new Databricks.Inputs.PipelineEnvironmentArgs { Dependencies = new[] { "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", }, }, }); });- package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{ Name: pulumi.String("Serverless demo"), Serverless: pulumi.Bool(true), Catalog: pulumi.String("main"), Schema: pulumi.String("ldp_demo"), Environment: &databricks.PipelineEnvironmentArgs{ Dependencies: pulumi.StringArray{ pulumi.String("foo==0.0.1"), pulumi.String("-r /Workspace/Users/user.name/my-pipeline/requirements.txt"), pulumi.String("/Volumes/main/default/libs/my_lib.whl"), }, }, }) if err != nil { return err } return nil }) }- package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Pipeline; import com.pulumi.databricks.PipelineArgs; import com.pulumi.databricks.inputs.PipelineEnvironmentArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new Pipeline("this", PipelineArgs.builder() .name("Serverless demo") .serverless(true) .catalog("main") .schema("ldp_demo") .environment(PipelineEnvironmentArgs.builder() .dependencies( "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl") .build()) .build()); } }- resources: this: type: databricks:Pipeline properties: name: Serverless demo serverless: true catalog: main schema: ldp_demo environment: dependencies: - foo==0.0.1 - -r /Workspace/Users/user.name/my-pipeline/requirements.txt - /Volumes/main/default/libs/my_lib.whl
- dependencies Sequence[str]
- a list of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information. - Example: - import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";- const _this = new databricks.Pipeline("this", { name: "Serverless demo", serverless: true, catalog: "main", schema: "ldp_demo", environment: { dependencies: [ "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", ], }, }); - import pulumi import pulumi_databricks as databricks this = databricks.Pipeline("this", name="Serverless demo", serverless=True, catalog="main", schema="ldp_demo", environment={ "dependencies": [ "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", ], })- using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var @this = new Databricks.Pipeline("this", new() { Name = "Serverless demo", Serverless = true, Catalog = "main", Schema = "ldp_demo", Environment = new Databricks.Inputs.PipelineEnvironmentArgs { Dependencies = new[] { "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", }, }, }); });- package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{ Name: pulumi.String("Serverless demo"), Serverless: pulumi.Bool(true), Catalog: pulumi.String("main"), Schema: pulumi.String("ldp_demo"), Environment: &databricks.PipelineEnvironmentArgs{ Dependencies: pulumi.StringArray{ pulumi.String("foo==0.0.1"), pulumi.String("-r /Workspace/Users/user.name/my-pipeline/requirements.txt"), pulumi.String("/Volumes/main/default/libs/my_lib.whl"), }, }, }) if err != nil { return err } return nil }) }- package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Pipeline; import com.pulumi.databricks.PipelineArgs; import com.pulumi.databricks.inputs.PipelineEnvironmentArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new Pipeline("this", PipelineArgs.builder() .name("Serverless demo") .serverless(true) .catalog("main") .schema("ldp_demo") .environment(PipelineEnvironmentArgs.builder() .dependencies( "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl") .build()) .build()); } }- resources: this: type: databricks:Pipeline properties: name: Serverless demo serverless: true catalog: main schema: ldp_demo environment: dependencies: - foo==0.0.1 - -r /Workspace/Users/user.name/my-pipeline/requirements.txt - /Volumes/main/default/libs/my_lib.whl
- dependencies List<String>
- a list of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information. - Example: - import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";- const _this = new databricks.Pipeline("this", { name: "Serverless demo", serverless: true, catalog: "main", schema: "ldp_demo", environment: { dependencies: [ "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", ], }, }); - import pulumi import pulumi_databricks as databricks this = databricks.Pipeline("this", name="Serverless demo", serverless=True, catalog="main", schema="ldp_demo", environment={ "dependencies": [ "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", ], })- using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var @this = new Databricks.Pipeline("this", new() { Name = "Serverless demo", Serverless = true, Catalog = "main", Schema = "ldp_demo", Environment = new Databricks.Inputs.PipelineEnvironmentArgs { Dependencies = new[] { "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl", }, }, }); });- package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{ Name: pulumi.String("Serverless demo"), Serverless: pulumi.Bool(true), Catalog: pulumi.String("main"), Schema: pulumi.String("ldp_demo"), Environment: &databricks.PipelineEnvironmentArgs{ Dependencies: pulumi.StringArray{ pulumi.String("foo==0.0.1"), pulumi.String("-r /Workspace/Users/user.name/my-pipeline/requirements.txt"), pulumi.String("/Volumes/main/default/libs/my_lib.whl"), }, }, }) if err != nil { return err } return nil }) }- package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Pipeline; import com.pulumi.databricks.PipelineArgs; import com.pulumi.databricks.inputs.PipelineEnvironmentArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new Pipeline("this", PipelineArgs.builder() .name("Serverless demo") .serverless(true) .catalog("main") .schema("ldp_demo") .environment(PipelineEnvironmentArgs.builder() .dependencies( "foo==0.0.1", "-r /Workspace/Users/user.name/my-pipeline/requirements.txt", "/Volumes/main/default/libs/my_lib.whl") .build()) .build()); } }- resources: this: type: databricks:Pipeline properties: name: Serverless demo serverless: true catalog: main schema: ldp_demo environment: dependencies: - foo==0.0.1 - -r /Workspace/Users/user.name/my-pipeline/requirements.txt - /Volumes/main/default/libs/my_lib.whl
PipelineEventLog, PipelineEventLogArgs      
PipelineFilters, PipelineFiltersArgs    
PipelineGatewayDefinition, PipelineGatewayDefinitionArgs      
- ConnectionName string
- GatewayStorage stringCatalog 
- Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
- GatewayStorage stringSchema 
- Required, Immutable. The name of the schema for the gateway pipelines's storage location.
- ConnectionId string
- Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
- GatewayStorage stringName 
- Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Lakeflow Declarative Pipelines system will automatically create the storage location under the catalog and schema.
- ConnectionName string
- GatewayStorage stringCatalog 
- Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
- GatewayStorage stringSchema 
- Required, Immutable. The name of the schema for the gateway pipelines's storage location.
- ConnectionId string
- Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
- GatewayStorage stringName 
- Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Lakeflow Declarative Pipelines system will automatically create the storage location under the catalog and schema.
- connectionName String
- gatewayStorage StringCatalog 
- Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
- gatewayStorage StringSchema 
- Required, Immutable. The name of the schema for the gateway pipelines's storage location.
- connectionId String
- Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
- gatewayStorage StringName 
- Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Lakeflow Declarative Pipelines system will automatically create the storage location under the catalog and schema.
- connectionName string
- gatewayStorage stringCatalog 
- Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
- gatewayStorage stringSchema 
- Required, Immutable. The name of the schema for the gateway pipelines's storage location.
- connectionId string
- Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
- gatewayStorage stringName 
- Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Lakeflow Declarative Pipelines system will automatically create the storage location under the catalog and schema.
- connection_name str
- gateway_storage_ strcatalog 
- Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
- gateway_storage_ strschema 
- Required, Immutable. The name of the schema for the gateway pipelines's storage location.
- connection_id str
- Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
- gateway_storage_ strname 
- Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Lakeflow Declarative Pipelines system will automatically create the storage location under the catalog and schema.
- connectionName String
- gatewayStorage StringCatalog 
- Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
- gatewayStorage StringSchema 
- Required, Immutable. The name of the schema for the gateway pipelines's storage location.
- connectionId String
- Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
- gatewayStorage StringName 
- Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Lakeflow Declarative Pipelines system will automatically create the storage location under the catalog and schema.
PipelineIngestionDefinition, PipelineIngestionDefinitionArgs      
PipelineIngestionDefinitionObject, PipelineIngestionDefinitionObjectArgs        
- Report
PipelineIngestion Definition Object Report 
- Schema
PipelineIngestion Definition Object Schema 
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- Table
PipelineIngestion Definition Object Table 
- Report
PipelineIngestion Definition Object Report 
- Schema
PipelineIngestion Definition Object Schema 
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- Table
PipelineIngestion Definition Object Table 
- report
PipelineIngestion Definition Object Report 
- schema
PipelineIngestion Definition Object Schema 
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- table
PipelineIngestion Definition Object Table 
- report
PipelineIngestion Definition Object Report 
- schema
PipelineIngestion Definition Object Schema 
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- table
PipelineIngestion Definition Object Table 
- report
PipelineIngestion Definition Object Report 
- schema
PipelineIngestion Definition Object Schema 
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- table
PipelineIngestion Definition Object Table 
- report Property Map
- schema Property Map
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- table Property Map
PipelineIngestionDefinitionObjectReport, PipelineIngestionDefinitionObjectReportArgs          
- destinationCatalog String
- destinationSchema String
- sourceUrl String
- destinationTable String
- tableConfiguration Property Map
PipelineIngestionDefinitionObjectReportTableConfiguration, PipelineIngestionDefinitionObjectReportTableConfigurationArgs              
- ExcludeColumns List<string>
- IncludeColumns List<string>
- PrimaryKeys List<string>
- QueryBased PipelineConnector Config Ingestion Definition Object Report Table Configuration Query Based Connector Config 
- SalesforceInclude boolFormula Fields 
- ScdType string
- SequenceBies List<string>
- ExcludeColumns []string
- IncludeColumns []string
- PrimaryKeys []string
- QueryBased PipelineConnector Config Ingestion Definition Object Report Table Configuration Query Based Connector Config 
- SalesforceInclude boolFormula Fields 
- ScdType string
- SequenceBies []string
- excludeColumns List<String>
- includeColumns List<String>
- primaryKeys List<String>
- queryBased PipelineConnector Config Ingestion Definition Object Report Table Configuration Query Based Connector Config 
- salesforceInclude BooleanFormula Fields 
- scdType String
- sequenceBies List<String>
- excludeColumns string[]
- includeColumns string[]
- primaryKeys string[]
- queryBased PipelineConnector Config Ingestion Definition Object Report Table Configuration Query Based Connector Config 
- salesforceInclude booleanFormula Fields 
- scdType string
- sequenceBies string[]
- exclude_columns Sequence[str]
- include_columns Sequence[str]
- primary_keys Sequence[str]
- query_based_ Pipelineconnector_ config Ingestion Definition Object Report Table Configuration Query Based Connector Config 
- salesforce_include_ boolformula_ fields 
- scd_type str
- sequence_bies Sequence[str]
- excludeColumns List<String>
- includeColumns List<String>
- primaryKeys List<String>
- queryBased Property MapConnector Config 
- salesforceInclude BooleanFormula Fields 
- scdType String
- sequenceBies List<String>
PipelineIngestionDefinitionObjectReportTableConfigurationQueryBasedConnectorConfig, PipelineIngestionDefinitionObjectReportTableConfigurationQueryBasedConnectorConfigArgs                      
- CursorColumns List<string>
- DeletionCondition string
- HardDeletion intSync Min Interval In Seconds 
- CursorColumns []string
- DeletionCondition string
- HardDeletion intSync Min Interval In Seconds 
- cursorColumns List<String>
- deletionCondition String
- hardDeletion IntegerSync Min Interval In Seconds 
- cursorColumns string[]
- deletionCondition string
- hardDeletion numberSync Min Interval In Seconds 
- cursor_columns Sequence[str]
- deletion_condition str
- hard_deletion_ intsync_ min_ interval_ in_ seconds 
- cursorColumns List<String>
- deletionCondition String
- hardDeletion NumberSync Min Interval In Seconds 
PipelineIngestionDefinitionObjectSchema, PipelineIngestionDefinitionObjectSchemaArgs          
- destinationCatalog String
- destinationSchema String
- sourceSchema String
- sourceCatalog String
- tableConfiguration Property Map
PipelineIngestionDefinitionObjectSchemaTableConfiguration, PipelineIngestionDefinitionObjectSchemaTableConfigurationArgs              
- ExcludeColumns List<string>
- IncludeColumns List<string>
- PrimaryKeys List<string>
- QueryBased PipelineConnector Config Ingestion Definition Object Schema Table Configuration Query Based Connector Config 
- SalesforceInclude boolFormula Fields 
- ScdType string
- SequenceBies List<string>
- ExcludeColumns []string
- IncludeColumns []string
- PrimaryKeys []string
- QueryBased PipelineConnector Config Ingestion Definition Object Schema Table Configuration Query Based Connector Config 
- SalesforceInclude boolFormula Fields 
- ScdType string
- SequenceBies []string
- excludeColumns List<String>
- includeColumns List<String>
- primaryKeys List<String>
- queryBased PipelineConnector Config Ingestion Definition Object Schema Table Configuration Query Based Connector Config 
- salesforceInclude BooleanFormula Fields 
- scdType String
- sequenceBies List<String>
- excludeColumns string[]
- includeColumns string[]
- primaryKeys string[]
- queryBased PipelineConnector Config Ingestion Definition Object Schema Table Configuration Query Based Connector Config 
- salesforceInclude booleanFormula Fields 
- scdType string
- sequenceBies string[]
- exclude_columns Sequence[str]
- include_columns Sequence[str]
- primary_keys Sequence[str]
- query_based_ Pipelineconnector_ config Ingestion Definition Object Schema Table Configuration Query Based Connector Config 
- salesforce_include_ boolformula_ fields 
- scd_type str
- sequence_bies Sequence[str]
- excludeColumns List<String>
- includeColumns List<String>
- primaryKeys List<String>
- queryBased Property MapConnector Config 
- salesforceInclude BooleanFormula Fields 
- scdType String
- sequenceBies List<String>
PipelineIngestionDefinitionObjectSchemaTableConfigurationQueryBasedConnectorConfig, PipelineIngestionDefinitionObjectSchemaTableConfigurationQueryBasedConnectorConfigArgs                      
- CursorColumns List<string>
- DeletionCondition string
- HardDeletion intSync Min Interval In Seconds 
- CursorColumns []string
- DeletionCondition string
- HardDeletion intSync Min Interval In Seconds 
- cursorColumns List<String>
- deletionCondition String
- hardDeletion IntegerSync Min Interval In Seconds 
- cursorColumns string[]
- deletionCondition string
- hardDeletion numberSync Min Interval In Seconds 
- cursor_columns Sequence[str]
- deletion_condition str
- hard_deletion_ intsync_ min_ interval_ in_ seconds 
- cursorColumns List<String>
- deletionCondition String
- hardDeletion NumberSync Min Interval In Seconds 
PipelineIngestionDefinitionObjectTable, PipelineIngestionDefinitionObjectTableArgs          
- DestinationCatalog string
- DestinationSchema string
- SourceTable string
- DestinationTable string
- SourceCatalog string
- SourceSchema string
- TableConfiguration PipelineIngestion Definition Object Table Table Configuration 
- DestinationCatalog string
- DestinationSchema string
- SourceTable string
- DestinationTable string
- SourceCatalog string
- SourceSchema string
- TableConfiguration PipelineIngestion Definition Object Table Table Configuration 
- destinationCatalog String
- destinationSchema String
- sourceTable String
- destinationTable String
- sourceCatalog String
- sourceSchema String
- tableConfiguration PipelineIngestion Definition Object Table Table Configuration 
- destinationCatalog string
- destinationSchema string
- sourceTable string
- destinationTable string
- sourceCatalog string
- sourceSchema string
- tableConfiguration PipelineIngestion Definition Object Table Table Configuration 
- destinationCatalog String
- destinationSchema String
- sourceTable String
- destinationTable String
- sourceCatalog String
- sourceSchema String
- tableConfiguration Property Map
PipelineIngestionDefinitionObjectTableTableConfiguration, PipelineIngestionDefinitionObjectTableTableConfigurationArgs              
- ExcludeColumns List<string>
- IncludeColumns List<string>
- PrimaryKeys List<string>
- QueryBased PipelineConnector Config Ingestion Definition Object Table Table Configuration Query Based Connector Config 
- SalesforceInclude boolFormula Fields 
- ScdType string
- SequenceBies List<string>
- ExcludeColumns []string
- IncludeColumns []string
- PrimaryKeys []string
- QueryBased PipelineConnector Config Ingestion Definition Object Table Table Configuration Query Based Connector Config 
- SalesforceInclude boolFormula Fields 
- ScdType string
- SequenceBies []string
- excludeColumns List<String>
- includeColumns List<String>
- primaryKeys List<String>
- queryBased PipelineConnector Config Ingestion Definition Object Table Table Configuration Query Based Connector Config 
- salesforceInclude BooleanFormula Fields 
- scdType String
- sequenceBies List<String>
- excludeColumns string[]
- includeColumns string[]
- primaryKeys string[]
- queryBased PipelineConnector Config Ingestion Definition Object Table Table Configuration Query Based Connector Config 
- salesforceInclude booleanFormula Fields 
- scdType string
- sequenceBies string[]
- exclude_columns Sequence[str]
- include_columns Sequence[str]
- primary_keys Sequence[str]
- query_based_ Pipelineconnector_ config Ingestion Definition Object Table Table Configuration Query Based Connector Config 
- salesforce_include_ boolformula_ fields 
- scd_type str
- sequence_bies Sequence[str]
- excludeColumns List<String>
- includeColumns List<String>
- primaryKeys List<String>
- queryBased Property MapConnector Config 
- salesforceInclude BooleanFormula Fields 
- scdType String
- sequenceBies List<String>
PipelineIngestionDefinitionObjectTableTableConfigurationQueryBasedConnectorConfig, PipelineIngestionDefinitionObjectTableTableConfigurationQueryBasedConnectorConfigArgs                      
- CursorColumns List<string>
- DeletionCondition string
- HardDeletion intSync Min Interval In Seconds 
- CursorColumns []string
- DeletionCondition string
- HardDeletion intSync Min Interval In Seconds 
- cursorColumns List<String>
- deletionCondition String
- hardDeletion IntegerSync Min Interval In Seconds 
- cursorColumns string[]
- deletionCondition string
- hardDeletion numberSync Min Interval In Seconds 
- cursor_columns Sequence[str]
- deletion_condition str
- hard_deletion_ intsync_ min_ interval_ in_ seconds 
- cursorColumns List<String>
- deletionCondition String
- hardDeletion NumberSync Min Interval In Seconds 
PipelineIngestionDefinitionSourceConfiguration, PipelineIngestionDefinitionSourceConfigurationArgs          
- Catalog
PipelineIngestion Definition Source Configuration Catalog 
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- Catalog
PipelineIngestion Definition Source Configuration Catalog 
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- catalog
PipelineIngestion Definition Source Configuration Catalog 
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- catalog
PipelineIngestion Definition Source Configuration Catalog 
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- catalog
PipelineIngestion Definition Source Configuration Catalog 
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
- catalog Property Map
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with storage).
PipelineIngestionDefinitionSourceConfigurationCatalog, PipelineIngestionDefinitionSourceConfigurationCatalogArgs            
PipelineIngestionDefinitionSourceConfigurationCatalogPostgres, PipelineIngestionDefinitionSourceConfigurationCatalogPostgresArgs              
PipelineIngestionDefinitionSourceConfigurationCatalogPostgresSlotConfig, PipelineIngestionDefinitionSourceConfigurationCatalogPostgresSlotConfigArgs                  
- PublicationName string
- SlotName string
- PublicationName string
- SlotName string
- publicationName String
- slotName String
- publicationName string
- slotName string
- publication_name str
- slot_name str
- publicationName String
- slotName String
PipelineIngestionDefinitionTableConfiguration, PipelineIngestionDefinitionTableConfigurationArgs          
- ExcludeColumns List<string>
- IncludeColumns List<string>
- PrimaryKeys List<string>
- QueryBased PipelineConnector Config Ingestion Definition Table Configuration Query Based Connector Config 
- SalesforceInclude boolFormula Fields 
- ScdType string
- SequenceBies List<string>
- ExcludeColumns []string
- IncludeColumns []string
- PrimaryKeys []string
- QueryBased PipelineConnector Config Ingestion Definition Table Configuration Query Based Connector Config 
- SalesforceInclude boolFormula Fields 
- ScdType string
- SequenceBies []string
- excludeColumns List<String>
- includeColumns List<String>
- primaryKeys List<String>
- queryBased PipelineConnector Config Ingestion Definition Table Configuration Query Based Connector Config 
- salesforceInclude BooleanFormula Fields 
- scdType String
- sequenceBies List<String>
- excludeColumns string[]
- includeColumns string[]
- primaryKeys string[]
- queryBased PipelineConnector Config Ingestion Definition Table Configuration Query Based Connector Config 
- salesforceInclude booleanFormula Fields 
- scdType string
- sequenceBies string[]
- exclude_columns Sequence[str]
- include_columns Sequence[str]
- primary_keys Sequence[str]
- query_based_ Pipelineconnector_ config Ingestion Definition Table Configuration Query Based Connector Config 
- salesforce_include_ boolformula_ fields 
- scd_type str
- sequence_bies Sequence[str]
- excludeColumns List<String>
- includeColumns List<String>
- primaryKeys List<String>
- queryBased Property MapConnector Config 
- salesforceInclude BooleanFormula Fields 
- scdType String
- sequenceBies List<String>
PipelineIngestionDefinitionTableConfigurationQueryBasedConnectorConfig, PipelineIngestionDefinitionTableConfigurationQueryBasedConnectorConfigArgs                  
- CursorColumns List<string>
- DeletionCondition string
- HardDeletion intSync Min Interval In Seconds 
- CursorColumns []string
- DeletionCondition string
- HardDeletion intSync Min Interval In Seconds 
- cursorColumns List<String>
- deletionCondition String
- hardDeletion IntegerSync Min Interval In Seconds 
- cursorColumns string[]
- deletionCondition string
- hardDeletion numberSync Min Interval In Seconds 
- cursor_columns Sequence[str]
- deletion_condition str
- hard_deletion_ intsync_ min_ interval_ in_ seconds 
- cursorColumns List<String>
- deletionCondition String
- hardDeletion NumberSync Min Interval In Seconds 
PipelineLatestUpdate, PipelineLatestUpdateArgs      
- CreationTime string
- State string
- UpdateId string
- CreationTime string
- State string
- UpdateId string
- creationTime String
- state String
- updateId String
- creationTime string
- state string
- updateId string
- creation_time str
- state str
- update_id str
- creationTime String
- state String
- updateId String
PipelineLibrary, PipelineLibraryArgs    
- File
PipelineLibrary File 
- specifies path to a file in Databricks Workspace to include as source. Actual path is specified as pathattribute inside the block.
- Glob
PipelineLibrary Glob 
- The unified field to include source code. Each entry should have the includeattribute that can specify a notebook path, a file path, or a folder path that ends/**(to include everything from that folder). This field cannot be used together withnotebookorfile.
- Jar string
- Maven
PipelineLibrary Maven 
- Notebook
PipelineLibrary Notebook 
- specifies path to a Databricks Notebook to include as source. Actual path is specified as pathattribute inside the block.
- Whl string
- File
PipelineLibrary File 
- specifies path to a file in Databricks Workspace to include as source. Actual path is specified as pathattribute inside the block.
- Glob
PipelineLibrary Glob 
- The unified field to include source code. Each entry should have the includeattribute that can specify a notebook path, a file path, or a folder path that ends/**(to include everything from that folder). This field cannot be used together withnotebookorfile.
- Jar string
- Maven
PipelineLibrary Maven 
- Notebook
PipelineLibrary Notebook 
- specifies path to a Databricks Notebook to include as source. Actual path is specified as pathattribute inside the block.
- Whl string
- file
PipelineLibrary File 
- specifies path to a file in Databricks Workspace to include as source. Actual path is specified as pathattribute inside the block.
- glob
PipelineLibrary Glob 
- The unified field to include source code. Each entry should have the includeattribute that can specify a notebook path, a file path, or a folder path that ends/**(to include everything from that folder). This field cannot be used together withnotebookorfile.
- jar String
- maven
PipelineLibrary Maven 
- notebook
PipelineLibrary Notebook 
- specifies path to a Databricks Notebook to include as source. Actual path is specified as pathattribute inside the block.
- whl String
- file
PipelineLibrary File 
- specifies path to a file in Databricks Workspace to include as source. Actual path is specified as pathattribute inside the block.
- glob
PipelineLibrary Glob 
- The unified field to include source code. Each entry should have the includeattribute that can specify a notebook path, a file path, or a folder path that ends/**(to include everything from that folder). This field cannot be used together withnotebookorfile.
- jar string
- maven
PipelineLibrary Maven 
- notebook
PipelineLibrary Notebook 
- specifies path to a Databricks Notebook to include as source. Actual path is specified as pathattribute inside the block.
- whl string
- file
PipelineLibrary File 
- specifies path to a file in Databricks Workspace to include as source. Actual path is specified as pathattribute inside the block.
- glob
PipelineLibrary Glob 
- The unified field to include source code. Each entry should have the includeattribute that can specify a notebook path, a file path, or a folder path that ends/**(to include everything from that folder). This field cannot be used together withnotebookorfile.
- jar str
- maven
PipelineLibrary Maven 
- notebook
PipelineLibrary Notebook 
- specifies path to a Databricks Notebook to include as source. Actual path is specified as pathattribute inside the block.
- whl str
- file Property Map
- specifies path to a file in Databricks Workspace to include as source. Actual path is specified as pathattribute inside the block.
- glob Property Map
- The unified field to include source code. Each entry should have the includeattribute that can specify a notebook path, a file path, or a folder path that ends/**(to include everything from that folder). This field cannot be used together withnotebookorfile.
- jar String
- maven Property Map
- notebook Property Map
- specifies path to a Databricks Notebook to include as source. Actual path is specified as pathattribute inside the block.
- whl String
PipelineLibraryFile, PipelineLibraryFileArgs      
- Path string
- Path string
- path String
- path string
- path str
- path String
PipelineLibraryGlob, PipelineLibraryGlobArgs      
- Include string
- Paths to include.
- Include string
- Paths to include.
- include String
- Paths to include.
- include string
- Paths to include.
- include str
- Paths to include.
- include String
- Paths to include.
PipelineLibraryMaven, PipelineLibraryMavenArgs      
- Coordinates string
- Exclusions List<string>
- Repo string
- Coordinates string
- Exclusions []string
- Repo string
- coordinates String
- exclusions List<String>
- repo String
- coordinates string
- exclusions string[]
- repo string
- coordinates str
- exclusions Sequence[str]
- repo str
- coordinates String
- exclusions List<String>
- repo String
PipelineLibraryNotebook, PipelineLibraryNotebookArgs      
- Path string
- Path string
- path String
- path string
- path str
- path String
PipelineNotification, PipelineNotificationArgs    
- Alerts List<string>
- non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list- on-update-success- a pipeline update completes successfully.
- on-update-failure- a pipeline update fails with a retryable error.
- on-update-fatal-failure- a pipeline update fails with a non-retryable (fatal) error.
- on-flow-failure- a single data flow fails.
 
- EmailRecipients List<string>
- non-empty list of emails to notify.
- Alerts []string
- non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list- on-update-success- a pipeline update completes successfully.
- on-update-failure- a pipeline update fails with a retryable error.
- on-update-fatal-failure- a pipeline update fails with a non-retryable (fatal) error.
- on-flow-failure- a single data flow fails.
 
- EmailRecipients []string
- non-empty list of emails to notify.
- alerts List<String>
- non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list- on-update-success- a pipeline update completes successfully.
- on-update-failure- a pipeline update fails with a retryable error.
- on-update-fatal-failure- a pipeline update fails with a non-retryable (fatal) error.
- on-flow-failure- a single data flow fails.
 
- emailRecipients List<String>
- non-empty list of emails to notify.
- alerts string[]
- non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list- on-update-success- a pipeline update completes successfully.
- on-update-failure- a pipeline update fails with a retryable error.
- on-update-fatal-failure- a pipeline update fails with a non-retryable (fatal) error.
- on-flow-failure- a single data flow fails.
 
- emailRecipients string[]
- non-empty list of emails to notify.
- alerts Sequence[str]
- non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list- on-update-success- a pipeline update completes successfully.
- on-update-failure- a pipeline update fails with a retryable error.
- on-update-fatal-failure- a pipeline update fails with a non-retryable (fatal) error.
- on-flow-failure- a single data flow fails.
 
- email_recipients Sequence[str]
- non-empty list of emails to notify.
- alerts List<String>
- non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list- on-update-success- a pipeline update completes successfully.
- on-update-failure- a pipeline update fails with a retryable error.
- on-update-fatal-failure- a pipeline update fails with a non-retryable (fatal) error.
- on-flow-failure- a single data flow fails.
 
- emailRecipients List<String>
- non-empty list of emails to notify.
PipelineRestartWindow, PipelineRestartWindowArgs      
- StartHour int
- DaysOf List<string>Weeks 
- TimeZone stringId 
- StartHour int
- DaysOf []stringWeeks 
- TimeZone stringId 
- startHour Integer
- daysOf List<String>Weeks 
- timeZone StringId 
- startHour number
- daysOf string[]Weeks 
- timeZone stringId 
- start_hour int
- days_of_ Sequence[str]weeks 
- time_zone_ strid 
- startHour Number
- daysOf List<String>Weeks 
- timeZone StringId 
PipelineRunAs, PipelineRunAsArgs      
- ServicePrincipal stringName 
- UserName string
- ServicePrincipal stringName 
- UserName string
- servicePrincipal StringName 
- userName String
- servicePrincipal stringName 
- userName string
- service_principal_ strname 
- user_name str
- servicePrincipal StringName 
- userName String
PipelineTrigger, PipelineTriggerArgs    
PipelineTriggerCron, PipelineTriggerCronArgs      
- QuartzCron stringSchedule 
- TimezoneId string
- QuartzCron stringSchedule 
- TimezoneId string
- quartzCron StringSchedule 
- timezoneId String
- quartzCron stringSchedule 
- timezoneId string
- quartz_cron_ strschedule 
- timezone_id str
- quartzCron StringSchedule 
- timezoneId String
Import
The resource job can be imported using the id of the pipeline
hcl
import {
to = databricks_pipeline.this
id = “
}
Alternatively, when using terraform version 1.4 or earlier, import using the pulumi import command:
bash
$ pulumi import databricks:index/pipeline:Pipeline this <pipeline-id>
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the databricksTerraform Provider.
