1. Packages
  2. Packages
  3. Databricks Provider
  4. API Docs
  5. Pipeline
Viewing docs for Databricks v0.4.0 (Older version)
published on Monday, Mar 9, 2026 by Pulumi
databricks logo
Viewing docs for Databricks v0.4.0 (Older version)
published on Monday, Mar 9, 2026 by Pulumi

    Use databricks.Pipeline to deploy Delta Live Tables.

    The following resources are often used in the same context:

    Example Usage

    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    class MyStack : Stack
    {
        public MyStack()
        {
            var dltDemo = new Databricks.Notebook("dltDemo", new Databricks.NotebookArgs
            {
            });
            //...
            var @this = new Databricks.Pipeline("this", new Databricks.PipelineArgs
            {
                Storage = "/test/first-pipeline",
                Configuration = 
                {
                    { "key1", "value1" },
                    { "key2", "value2" },
                },
                Clusters = 
                {
                    new Databricks.Inputs.PipelineClusterArgs
                    {
                        Label = "default",
                        NumWorkers = 2,
                        CustomTags = 
                        {
                            { "cluster_type", "default" },
                        },
                    },
                    new Databricks.Inputs.PipelineClusterArgs
                    {
                        Label = "maintenance",
                        NumWorkers = 1,
                        CustomTags = 
                        {
                            { "cluster_type", "maintenance" },
                        },
                    },
                },
                Libraries = 
                {
                    new Databricks.Inputs.PipelineLibraryArgs
                    {
                        Notebook = new Databricks.Inputs.PipelineLibraryNotebookArgs
                        {
                            Path = dltDemo.Id,
                        },
                    },
                },
                Filters = new Databricks.Inputs.PipelineFiltersArgs
                {
                    Includes = 
                    {
                        "com.databricks.include",
                    },
                    Excludes = 
                    {
                        "com.databricks.exclude",
                    },
                },
                Continuous = false,
            });
        }
    
    }
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		dltDemo, err := databricks.NewNotebook(ctx, "dltDemo", nil)
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{
    			Storage: pulumi.String("/test/first-pipeline"),
    			Configuration: pulumi.AnyMap{
    				"key1": pulumi.Any("value1"),
    				"key2": pulumi.Any("value2"),
    			},
    			Clusters: PipelineClusterArray{
    				&PipelineClusterArgs{
    					Label:      pulumi.String("default"),
    					NumWorkers: pulumi.Int(2),
    					CustomTags: pulumi.AnyMap{
    						"cluster_type": pulumi.Any("default"),
    					},
    				},
    				&PipelineClusterArgs{
    					Label:      pulumi.String("maintenance"),
    					NumWorkers: pulumi.Int(1),
    					CustomTags: pulumi.AnyMap{
    						"cluster_type": pulumi.Any("maintenance"),
    					},
    				},
    			},
    			Libraries: PipelineLibraryArray{
    				&PipelineLibraryArgs{
    					Notebook: &PipelineLibraryNotebookArgs{
    						Path: dltDemo.ID(),
    					},
    				},
    			},
    			Filters: &PipelineFiltersArgs{
    				Includes: pulumi.StringArray{
    					pulumi.String("com.databricks.include"),
    				},
    				Excludes: pulumi.StringArray{
    					pulumi.String("com.databricks.exclude"),
    				},
    			},
    			Continuous: pulumi.Bool(false),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    

    Example coming soon!

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const dltDemo = new databricks.Notebook("dltDemo", {});
    //...
    const _this = new databricks.Pipeline("this", {
        storage: "/test/first-pipeline",
        configuration: {
            key1: "value1",
            key2: "value2",
        },
        clusters: [
            {
                label: "default",
                numWorkers: 2,
                customTags: {
                    cluster_type: "default",
                },
            },
            {
                label: "maintenance",
                numWorkers: 1,
                customTags: {
                    cluster_type: "maintenance",
                },
            },
        ],
        libraries: [{
            notebook: {
                path: dltDemo.id,
            },
        }],
        filters: {
            includes: ["com.databricks.include"],
            excludes: ["com.databricks.exclude"],
        },
        continuous: false,
    });
    
    import pulumi
    import pulumi_databricks as databricks
    
    dlt_demo = databricks.Notebook("dltDemo")
    #...
    this = databricks.Pipeline("this",
        storage="/test/first-pipeline",
        configuration={
            "key1": "value1",
            "key2": "value2",
        },
        clusters=[
            databricks.PipelineClusterArgs(
                label="default",
                num_workers=2,
                custom_tags={
                    "cluster_type": "default",
                },
            ),
            databricks.PipelineClusterArgs(
                label="maintenance",
                num_workers=1,
                custom_tags={
                    "cluster_type": "maintenance",
                },
            ),
        ],
        libraries=[databricks.PipelineLibraryArgs(
            notebook=databricks.PipelineLibraryNotebookArgs(
                path=dlt_demo.id,
            ),
        )],
        filters=databricks.PipelineFiltersArgs(
            includes=["com.databricks.include"],
            excludes=["com.databricks.exclude"],
        ),
        continuous=False)
    

    Example coming soon!

    Create Pipeline Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Pipeline(name: string, args: PipelineArgs, opts?: CustomResourceOptions);
    @overload
    def Pipeline(resource_name: str,
                 args: PipelineArgs,
                 opts: Optional[ResourceOptions] = None)
    
    @overload
    def Pipeline(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 filters: Optional[PipelineFiltersArgs] = None,
                 allow_duplicate_names: Optional[bool] = None,
                 clusters: Optional[Sequence[PipelineClusterArgs]] = None,
                 configuration: Optional[Mapping[str, Any]] = None,
                 continuous: Optional[bool] = None,
                 libraries: Optional[Sequence[PipelineLibraryArgs]] = None,
                 name: Optional[str] = None,
                 storage: Optional[str] = None,
                 target: Optional[str] = None)
    func NewPipeline(ctx *Context, name string, args PipelineArgs, opts ...ResourceOption) (*Pipeline, error)
    public Pipeline(string name, PipelineArgs args, CustomResourceOptions? opts = null)
    public Pipeline(String name, PipelineArgs args)
    public Pipeline(String name, PipelineArgs args, CustomResourceOptions options)
    
    type: databricks:Pipeline
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args PipelineArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args PipelineArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args PipelineArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args PipelineArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args PipelineArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var pipelineResource = new Databricks.Pipeline("pipelineResource", new()
    {
        Filters = new Databricks.Inputs.PipelineFiltersArgs
        {
            Excludes = new[]
            {
                "string",
            },
            Includes = new[]
            {
                "string",
            },
        },
        AllowDuplicateNames = false,
        Clusters = new[]
        {
            new Databricks.Inputs.PipelineClusterArgs
            {
                Autoscale = new Databricks.Inputs.PipelineClusterAutoscaleArgs
                {
                    MaxWorkers = 0,
                    MinWorkers = 0,
                },
                AwsAttributes = new Databricks.Inputs.PipelineClusterAwsAttributesArgs
                {
                    InstanceProfileArn = "string",
                    ZoneId = "string",
                },
                ClusterLogConf = new Databricks.Inputs.PipelineClusterClusterLogConfArgs
                {
                    Dbfs = new Databricks.Inputs.PipelineClusterClusterLogConfDbfsArgs
                    {
                        Destination = "string",
                    },
                    S3 = new Databricks.Inputs.PipelineClusterClusterLogConfS3Args
                    {
                        Destination = "string",
                        CannedAcl = "string",
                        EnableEncryption = false,
                        EncryptionType = "string",
                        Endpoint = "string",
                        KmsKey = "string",
                        Region = "string",
                    },
                },
                CustomTags = 
                {
                    { "string", "any" },
                },
                DriverNodeTypeId = "string",
                InitScripts = new[]
                {
                    new Databricks.Inputs.PipelineClusterInitScriptArgs
                    {
                        Dbfs = new Databricks.Inputs.PipelineClusterInitScriptDbfsArgs
                        {
                            Destination = "string",
                        },
                        File = new Databricks.Inputs.PipelineClusterInitScriptFileArgs
                        {
                            Destination = "string",
                        },
                        S3 = new Databricks.Inputs.PipelineClusterInitScriptS3Args
                        {
                            Destination = "string",
                            CannedAcl = "string",
                            EnableEncryption = false,
                            EncryptionType = "string",
                            Endpoint = "string",
                            KmsKey = "string",
                            Region = "string",
                        },
                    },
                },
                InstancePoolId = "string",
                Label = "string",
                NodeTypeId = "string",
                NumWorkers = 0,
                SparkConf = 
                {
                    { "string", "any" },
                },
                SparkEnvVars = 
                {
                    { "string", "any" },
                },
                SshPublicKeys = new[]
                {
                    "string",
                },
            },
        },
        Configuration = 
        {
            { "string", "any" },
        },
        Continuous = false,
        Libraries = new[]
        {
            new Databricks.Inputs.PipelineLibraryArgs
            {
                Jar = "string",
                Maven = new Databricks.Inputs.PipelineLibraryMavenArgs
                {
                    Coordinates = "string",
                    Exclusions = new[]
                    {
                        "string",
                    },
                    Repo = "string",
                },
                Notebook = new Databricks.Inputs.PipelineLibraryNotebookArgs
                {
                    Path = "string",
                },
                Whl = "string",
            },
        },
        Name = "string",
        Storage = "string",
        Target = "string",
    });
    
    example, err := databricks.NewPipeline(ctx, "pipelineResource", &databricks.PipelineArgs{
    	Filters: &databricks.PipelineFiltersArgs{
    		Excludes: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Includes: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    	},
    	AllowDuplicateNames: pulumi.Bool(false),
    	Clusters: databricks.PipelineClusterArray{
    		&databricks.PipelineClusterArgs{
    			Autoscale: &databricks.PipelineClusterAutoscaleArgs{
    				MaxWorkers: pulumi.Int(0),
    				MinWorkers: pulumi.Int(0),
    			},
    			AwsAttributes: &databricks.PipelineClusterAwsAttributesArgs{
    				InstanceProfileArn: pulumi.String("string"),
    				ZoneId:             pulumi.String("string"),
    			},
    			ClusterLogConf: &databricks.PipelineClusterClusterLogConfArgs{
    				Dbfs: &databricks.PipelineClusterClusterLogConfDbfsArgs{
    					Destination: pulumi.String("string"),
    				},
    				S3: &databricks.PipelineClusterClusterLogConfS3Args{
    					Destination:      pulumi.String("string"),
    					CannedAcl:        pulumi.String("string"),
    					EnableEncryption: pulumi.Bool(false),
    					EncryptionType:   pulumi.String("string"),
    					Endpoint:         pulumi.String("string"),
    					KmsKey:           pulumi.String("string"),
    					Region:           pulumi.String("string"),
    				},
    			},
    			CustomTags: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    			DriverNodeTypeId: pulumi.String("string"),
    			InitScripts: databricks.PipelineClusterInitScriptArray{
    				&databricks.PipelineClusterInitScriptArgs{
    					Dbfs: &databricks.PipelineClusterInitScriptDbfsArgs{
    						Destination: pulumi.String("string"),
    					},
    					File: &databricks.PipelineClusterInitScriptFileArgs{
    						Destination: pulumi.String("string"),
    					},
    					S3: &databricks.PipelineClusterInitScriptS3Args{
    						Destination:      pulumi.String("string"),
    						CannedAcl:        pulumi.String("string"),
    						EnableEncryption: pulumi.Bool(false),
    						EncryptionType:   pulumi.String("string"),
    						Endpoint:         pulumi.String("string"),
    						KmsKey:           pulumi.String("string"),
    						Region:           pulumi.String("string"),
    					},
    				},
    			},
    			InstancePoolId: pulumi.String("string"),
    			Label:          pulumi.String("string"),
    			NodeTypeId:     pulumi.String("string"),
    			NumWorkers:     pulumi.Int(0),
    			SparkConf: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    			SparkEnvVars: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    			SshPublicKeys: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    	},
    	Configuration: pulumi.Map{
    		"string": pulumi.Any("any"),
    	},
    	Continuous: pulumi.Bool(false),
    	Libraries: databricks.PipelineLibraryArray{
    		&databricks.PipelineLibraryArgs{
    			Jar: pulumi.String("string"),
    			Maven: &databricks.PipelineLibraryMavenArgs{
    				Coordinates: pulumi.String("string"),
    				Exclusions: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				Repo: pulumi.String("string"),
    			},
    			Notebook: &databricks.PipelineLibraryNotebookArgs{
    				Path: pulumi.String("string"),
    			},
    			Whl: pulumi.String("string"),
    		},
    	},
    	Name:    pulumi.String("string"),
    	Storage: pulumi.String("string"),
    	Target:  pulumi.String("string"),
    })
    
    var pipelineResource = new Pipeline("pipelineResource", PipelineArgs.builder()
        .filters(PipelineFiltersArgs.builder()
            .excludes("string")
            .includes("string")
            .build())
        .allowDuplicateNames(false)
        .clusters(PipelineClusterArgs.builder()
            .autoscale(PipelineClusterAutoscaleArgs.builder()
                .maxWorkers(0)
                .minWorkers(0)
                .build())
            .awsAttributes(PipelineClusterAwsAttributesArgs.builder()
                .instanceProfileArn("string")
                .zoneId("string")
                .build())
            .clusterLogConf(PipelineClusterClusterLogConfArgs.builder()
                .dbfs(PipelineClusterClusterLogConfDbfsArgs.builder()
                    .destination("string")
                    .build())
                .s3(PipelineClusterClusterLogConfS3Args.builder()
                    .destination("string")
                    .cannedAcl("string")
                    .enableEncryption(false)
                    .encryptionType("string")
                    .endpoint("string")
                    .kmsKey("string")
                    .region("string")
                    .build())
                .build())
            .customTags(Map.of("string", "any"))
            .driverNodeTypeId("string")
            .initScripts(PipelineClusterInitScriptArgs.builder()
                .dbfs(PipelineClusterInitScriptDbfsArgs.builder()
                    .destination("string")
                    .build())
                .file(PipelineClusterInitScriptFileArgs.builder()
                    .destination("string")
                    .build())
                .s3(PipelineClusterInitScriptS3Args.builder()
                    .destination("string")
                    .cannedAcl("string")
                    .enableEncryption(false)
                    .encryptionType("string")
                    .endpoint("string")
                    .kmsKey("string")
                    .region("string")
                    .build())
                .build())
            .instancePoolId("string")
            .label("string")
            .nodeTypeId("string")
            .numWorkers(0)
            .sparkConf(Map.of("string", "any"))
            .sparkEnvVars(Map.of("string", "any"))
            .sshPublicKeys("string")
            .build())
        .configuration(Map.of("string", "any"))
        .continuous(false)
        .libraries(PipelineLibraryArgs.builder()
            .jar("string")
            .maven(PipelineLibraryMavenArgs.builder()
                .coordinates("string")
                .exclusions("string")
                .repo("string")
                .build())
            .notebook(PipelineLibraryNotebookArgs.builder()
                .path("string")
                .build())
            .whl("string")
            .build())
        .name("string")
        .storage("string")
        .target("string")
        .build());
    
    pipeline_resource = databricks.Pipeline("pipelineResource",
        filters={
            "excludes": ["string"],
            "includes": ["string"],
        },
        allow_duplicate_names=False,
        clusters=[{
            "autoscale": {
                "max_workers": 0,
                "min_workers": 0,
            },
            "aws_attributes": {
                "instance_profile_arn": "string",
                "zone_id": "string",
            },
            "cluster_log_conf": {
                "dbfs": {
                    "destination": "string",
                },
                "s3": {
                    "destination": "string",
                    "canned_acl": "string",
                    "enable_encryption": False,
                    "encryption_type": "string",
                    "endpoint": "string",
                    "kms_key": "string",
                    "region": "string",
                },
            },
            "custom_tags": {
                "string": "any",
            },
            "driver_node_type_id": "string",
            "init_scripts": [{
                "dbfs": {
                    "destination": "string",
                },
                "file": {
                    "destination": "string",
                },
                "s3": {
                    "destination": "string",
                    "canned_acl": "string",
                    "enable_encryption": False,
                    "encryption_type": "string",
                    "endpoint": "string",
                    "kms_key": "string",
                    "region": "string",
                },
            }],
            "instance_pool_id": "string",
            "label": "string",
            "node_type_id": "string",
            "num_workers": 0,
            "spark_conf": {
                "string": "any",
            },
            "spark_env_vars": {
                "string": "any",
            },
            "ssh_public_keys": ["string"],
        }],
        configuration={
            "string": "any",
        },
        continuous=False,
        libraries=[{
            "jar": "string",
            "maven": {
                "coordinates": "string",
                "exclusions": ["string"],
                "repo": "string",
            },
            "notebook": {
                "path": "string",
            },
            "whl": "string",
        }],
        name="string",
        storage="string",
        target="string")
    
    const pipelineResource = new databricks.Pipeline("pipelineResource", {
        filters: {
            excludes: ["string"],
            includes: ["string"],
        },
        allowDuplicateNames: false,
        clusters: [{
            autoscale: {
                maxWorkers: 0,
                minWorkers: 0,
            },
            awsAttributes: {
                instanceProfileArn: "string",
                zoneId: "string",
            },
            clusterLogConf: {
                dbfs: {
                    destination: "string",
                },
                s3: {
                    destination: "string",
                    cannedAcl: "string",
                    enableEncryption: false,
                    encryptionType: "string",
                    endpoint: "string",
                    kmsKey: "string",
                    region: "string",
                },
            },
            customTags: {
                string: "any",
            },
            driverNodeTypeId: "string",
            initScripts: [{
                dbfs: {
                    destination: "string",
                },
                file: {
                    destination: "string",
                },
                s3: {
                    destination: "string",
                    cannedAcl: "string",
                    enableEncryption: false,
                    encryptionType: "string",
                    endpoint: "string",
                    kmsKey: "string",
                    region: "string",
                },
            }],
            instancePoolId: "string",
            label: "string",
            nodeTypeId: "string",
            numWorkers: 0,
            sparkConf: {
                string: "any",
            },
            sparkEnvVars: {
                string: "any",
            },
            sshPublicKeys: ["string"],
        }],
        configuration: {
            string: "any",
        },
        continuous: false,
        libraries: [{
            jar: "string",
            maven: {
                coordinates: "string",
                exclusions: ["string"],
                repo: "string",
            },
            notebook: {
                path: "string",
            },
            whl: "string",
        }],
        name: "string",
        storage: "string",
        target: "string",
    });
    
    type: databricks:Pipeline
    properties:
        allowDuplicateNames: false
        clusters:
            - autoscale:
                maxWorkers: 0
                minWorkers: 0
              awsAttributes:
                instanceProfileArn: string
                zoneId: string
              clusterLogConf:
                dbfs:
                    destination: string
                s3:
                    cannedAcl: string
                    destination: string
                    enableEncryption: false
                    encryptionType: string
                    endpoint: string
                    kmsKey: string
                    region: string
              customTags:
                string: any
              driverNodeTypeId: string
              initScripts:
                - dbfs:
                    destination: string
                  file:
                    destination: string
                  s3:
                    cannedAcl: string
                    destination: string
                    enableEncryption: false
                    encryptionType: string
                    endpoint: string
                    kmsKey: string
                    region: string
              instancePoolId: string
              label: string
              nodeTypeId: string
              numWorkers: 0
              sparkConf:
                string: any
              sparkEnvVars:
                string: any
              sshPublicKeys:
                - string
        configuration:
            string: any
        continuous: false
        filters:
            excludes:
                - string
            includes:
                - string
        libraries:
            - jar: string
              maven:
                coordinates: string
                exclusions:
                    - string
                repo: string
              notebook:
                path: string
              whl: string
        name: string
        storage: string
        target: string
    

    Pipeline Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The Pipeline resource accepts the following input properties:

    Filters PipelineFilters
    AllowDuplicateNames bool
    Clusters List<PipelineCluster>
    blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
    Configuration Dictionary<string, object>
    An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
    Continuous bool
    A flag indicating whether to run the pipeline continuously. The default value is false.
    Libraries List<PipelineLibrary>
    blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook type of library that should have path attribute.
    Name string
    A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
    Storage string
    A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
    Target string
    The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
    Filters PipelineFiltersArgs
    AllowDuplicateNames bool
    Clusters []PipelineClusterArgs
    blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
    Configuration map[string]interface{}
    An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
    Continuous bool
    A flag indicating whether to run the pipeline continuously. The default value is false.
    Libraries []PipelineLibraryArgs
    blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook type of library that should have path attribute.
    Name string
    A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
    Storage string
    A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
    Target string
    The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
    filters PipelineFilters
    allowDuplicateNames Boolean
    clusters List<PipelineCluster>
    blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
    configuration Map<String,Object>
    An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
    continuous Boolean
    A flag indicating whether to run the pipeline continuously. The default value is false.
    libraries List<PipelineLibrary>
    blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook type of library that should have path attribute.
    name String
    A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
    storage String
    A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
    target String
    The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
    filters PipelineFilters
    allowDuplicateNames boolean
    clusters PipelineCluster[]
    blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
    configuration {[key: string]: any}
    An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
    continuous boolean
    A flag indicating whether to run the pipeline continuously. The default value is false.
    libraries PipelineLibrary[]
    blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook type of library that should have path attribute.
    name string
    A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
    storage string
    A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
    target string
    The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
    filters PipelineFiltersArgs
    allow_duplicate_names bool
    clusters Sequence[PipelineClusterArgs]
    blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
    configuration Mapping[str, Any]
    An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
    continuous bool
    A flag indicating whether to run the pipeline continuously. The default value is false.
    libraries Sequence[PipelineLibraryArgs]
    blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook type of library that should have path attribute.
    name str
    A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
    storage str
    A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
    target str
    The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
    filters Property Map
    allowDuplicateNames Boolean
    clusters List<Property Map>
    blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
    configuration Map<Any>
    An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
    continuous Boolean
    A flag indicating whether to run the pipeline continuously. The default value is false.
    libraries List<Property Map>
    blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook type of library that should have path attribute.
    name String
    A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
    storage String
    A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
    target String
    The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Pipeline resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Url string
    Id string
    The provider-assigned unique ID for this managed resource.
    Url string
    id String
    The provider-assigned unique ID for this managed resource.
    url String
    id string
    The provider-assigned unique ID for this managed resource.
    url string
    id str
    The provider-assigned unique ID for this managed resource.
    url str
    id String
    The provider-assigned unique ID for this managed resource.
    url String

    Look up Existing Pipeline Resource

    Get an existing Pipeline resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: PipelineState, opts?: CustomResourceOptions): Pipeline
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            allow_duplicate_names: Optional[bool] = None,
            clusters: Optional[Sequence[PipelineClusterArgs]] = None,
            configuration: Optional[Mapping[str, Any]] = None,
            continuous: Optional[bool] = None,
            filters: Optional[PipelineFiltersArgs] = None,
            libraries: Optional[Sequence[PipelineLibraryArgs]] = None,
            name: Optional[str] = None,
            storage: Optional[str] = None,
            target: Optional[str] = None,
            url: Optional[str] = None) -> Pipeline
    func GetPipeline(ctx *Context, name string, id IDInput, state *PipelineState, opts ...ResourceOption) (*Pipeline, error)
    public static Pipeline Get(string name, Input<string> id, PipelineState? state, CustomResourceOptions? opts = null)
    public static Pipeline get(String name, Output<String> id, PipelineState state, CustomResourceOptions options)
    resources:  _:    type: databricks:Pipeline    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AllowDuplicateNames bool
    Clusters List<PipelineCluster>
    blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
    Configuration Dictionary<string, object>
    An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
    Continuous bool
    A flag indicating whether to run the pipeline continuously. The default value is false.
    Filters PipelineFilters
    Libraries List<PipelineLibrary>
    blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook type of library that should have path attribute.
    Name string
    A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
    Storage string
    A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
    Target string
    The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
    Url string
    AllowDuplicateNames bool
    Clusters []PipelineClusterArgs
    blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
    Configuration map[string]interface{}
    An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
    Continuous bool
    A flag indicating whether to run the pipeline continuously. The default value is false.
    Filters PipelineFiltersArgs
    Libraries []PipelineLibraryArgs
    blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook type of library that should have path attribute.
    Name string
    A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
    Storage string
    A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
    Target string
    The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
    Url string
    allowDuplicateNames Boolean
    clusters List<PipelineCluster>
    blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
    configuration Map<String,Object>
    An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
    continuous Boolean
    A flag indicating whether to run the pipeline continuously. The default value is false.
    filters PipelineFilters
    libraries List<PipelineLibrary>
    blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook type of library that should have path attribute.
    name String
    A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
    storage String
    A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
    target String
    The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
    url String
    allowDuplicateNames boolean
    clusters PipelineCluster[]
    blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
    configuration {[key: string]: any}
    An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
    continuous boolean
    A flag indicating whether to run the pipeline continuously. The default value is false.
    filters PipelineFilters
    libraries PipelineLibrary[]
    blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook type of library that should have path attribute.
    name string
    A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
    storage string
    A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
    target string
    The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
    url string
    allow_duplicate_names bool
    clusters Sequence[PipelineClusterArgs]
    blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
    configuration Mapping[str, Any]
    An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
    continuous bool
    A flag indicating whether to run the pipeline continuously. The default value is false.
    filters PipelineFiltersArgs
    libraries Sequence[PipelineLibraryArgs]
    blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook type of library that should have path attribute.
    name str
    A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
    storage str
    A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
    target str
    The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
    url str
    allowDuplicateNames Boolean
    clusters List<Property Map>
    blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline.
    configuration Map<Any>
    An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
    continuous Boolean
    A flag indicating whether to run the pipeline continuously. The default value is false.
    filters Property Map
    libraries List<Property Map>
    blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special notebook type of library that should have path attribute.
    name String
    A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
    storage String
    A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location.
    target String
    The name of a database for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
    url String

    Supporting Types

    PipelineCluster, PipelineClusterArgs

    PipelineClusterAutoscale, PipelineClusterAutoscaleArgs

    maxWorkers Integer
    minWorkers Integer
    maxWorkers number
    minWorkers number
    maxWorkers Number
    minWorkers Number

    PipelineClusterAwsAttributes, PipelineClusterAwsAttributesArgs

    PipelineClusterClusterLogConf, PipelineClusterClusterLogConfArgs

    PipelineClusterClusterLogConfDbfs, PipelineClusterClusterLogConfDbfsArgs

    PipelineClusterClusterLogConfS3, PipelineClusterClusterLogConfS3Args

    Destination string
    CannedAcl string
    EnableEncryption bool
    EncryptionType string
    Endpoint string
    KmsKey string
    Region string
    Destination string
    CannedAcl string
    EnableEncryption bool
    EncryptionType string
    Endpoint string
    KmsKey string
    Region string
    destination String
    cannedAcl String
    enableEncryption Boolean
    encryptionType String
    endpoint String
    kmsKey String
    region String
    destination string
    cannedAcl string
    enableEncryption boolean
    encryptionType string
    endpoint string
    kmsKey string
    region string
    destination String
    cannedAcl String
    enableEncryption Boolean
    encryptionType String
    endpoint String
    kmsKey String
    region String

    PipelineClusterInitScript, PipelineClusterInitScriptArgs

    PipelineClusterInitScriptDbfs, PipelineClusterInitScriptDbfsArgs

    PipelineClusterInitScriptFile, PipelineClusterInitScriptFileArgs

    PipelineClusterInitScriptS3, PipelineClusterInitScriptS3Args

    Destination string
    CannedAcl string
    EnableEncryption bool
    EncryptionType string
    Endpoint string
    KmsKey string
    Region string
    Destination string
    CannedAcl string
    EnableEncryption bool
    EncryptionType string
    Endpoint string
    KmsKey string
    Region string
    destination String
    cannedAcl String
    enableEncryption Boolean
    encryptionType String
    endpoint String
    kmsKey String
    region String
    destination string
    cannedAcl string
    enableEncryption boolean
    encryptionType string
    endpoint string
    kmsKey string
    region string
    destination String
    cannedAcl String
    enableEncryption Boolean
    encryptionType String
    endpoint String
    kmsKey String
    region String

    PipelineFilters, PipelineFiltersArgs

    Excludes List<string>
    Includes List<string>
    Excludes []string
    Includes []string
    excludes List<String>
    includes List<String>
    excludes string[]
    includes string[]
    excludes Sequence[str]
    includes Sequence[str]
    excludes List<String>
    includes List<String>

    PipelineLibrary, PipelineLibraryArgs

    PipelineLibraryMaven, PipelineLibraryMavenArgs

    Coordinates string
    Exclusions List<string>
    Repo string
    Coordinates string
    Exclusions []string
    Repo string
    coordinates String
    exclusions List<String>
    repo String
    coordinates string
    exclusions string[]
    repo string
    coordinates str
    exclusions Sequence[str]
    repo str
    coordinates String
    exclusions List<String>
    repo String

    PipelineLibraryNotebook, PipelineLibraryNotebookArgs

    Path string
    Path string
    path String
    path string
    path str
    path String

    Import

    The resource job can be imported using the id of the pipeline bash

     $ pulumi import databricks:index/pipeline:Pipeline this <pipeline-id>
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    databricks pulumi/pulumi-databricks
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the databricks Terraform Provider.
    databricks logo
    Viewing docs for Databricks v0.4.0 (Older version)
    published on Monday, Mar 9, 2026 by Pulumi
      Try Pulumi Cloud free. Your team will thank you.