1. Packages
  2. Databricks Provider
  3. API Docs
  4. DataQualityMonitor
Databricks v1.78.0 published on Friday, Nov 7, 2025 by Pulumi

databricks.DataQualityMonitor

Get Started
databricks logo
Databricks v1.78.0 published on Friday, Nov 7, 2025 by Pulumi

    Public Beta

    This resource allows you to set up data quality monitoring checks for Unity Catalog objects, currently schema and table.

    For the table object_type, you must either:

    1. be an owner of the table’s parent catalog, have USE_SCHEMA on the table’s parent schema, and have SELECT access on the table
    2. have USE_CATALOG on the table’s parent catalog, be an owner of the table’s parent schema, and have SELECT access on the table.
    3. have the following permissions:
      • USE_CATALOG on the table’s parent catalog
      • USE_SCHEMA on the table’s parent schema
      • be an owner of the table.

    Note This resource can only be used with a workspace-level provider!

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const _this = new databricks.Schema("this", {
        catalogName: "my_catalog",
        name: "my_schema",
    });
    const thisDataQualityMonitor = new databricks.DataQualityMonitor("this", {
        objectType: "schema",
        objectId: _this.schemaId,
        anomalyDetectionConfig: {},
    });
    
    import pulumi
    import pulumi_databricks as databricks
    
    this = databricks.Schema("this",
        catalog_name="my_catalog",
        name="my_schema")
    this_data_quality_monitor = databricks.DataQualityMonitor("this",
        object_type="schema",
        object_id=this.schema_id,
        anomaly_detection_config={})
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		this, err := databricks.NewSchema(ctx, "this", &databricks.SchemaArgs{
    			CatalogName: pulumi.String("my_catalog"),
    			Name:        pulumi.String("my_schema"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = databricks.NewDataQualityMonitor(ctx, "this", &databricks.DataQualityMonitorArgs{
    			ObjectType:             pulumi.String("schema"),
    			ObjectId:               this.SchemaId,
    			AnomalyDetectionConfig: &databricks.DataQualityMonitorAnomalyDetectionConfigArgs{},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var @this = new Databricks.Schema("this", new()
        {
            CatalogName = "my_catalog",
            Name = "my_schema",
        });
    
        var thisDataQualityMonitor = new Databricks.DataQualityMonitor("this", new()
        {
            ObjectType = "schema",
            ObjectId = @this.SchemaId,
            AnomalyDetectionConfig = null,
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.Schema;
    import com.pulumi.databricks.SchemaArgs;
    import com.pulumi.databricks.DataQualityMonitor;
    import com.pulumi.databricks.DataQualityMonitorArgs;
    import com.pulumi.databricks.inputs.DataQualityMonitorAnomalyDetectionConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var this_ = new Schema("this", SchemaArgs.builder()
                .catalogName("my_catalog")
                .name("my_schema")
                .build());
    
            var thisDataQualityMonitor = new DataQualityMonitor("thisDataQualityMonitor", DataQualityMonitorArgs.builder()
                .objectType("schema")
                .objectId(this_.schemaId())
                .anomalyDetectionConfig(DataQualityMonitorAnomalyDetectionConfigArgs.builder()
                    .build())
                .build());
    
        }
    }
    
    resources:
      this:
        type: databricks:Schema
        properties:
          catalogName: my_catalog
          name: my_schema
      thisDataQualityMonitor:
        type: databricks:DataQualityMonitor
        name: this
        properties:
          objectType: schema
          objectId: ${this.schemaId}
          anomalyDetectionConfig: {}
    

    Create DataQualityMonitor Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new DataQualityMonitor(name: string, args: DataQualityMonitorArgs, opts?: CustomResourceOptions);
    @overload
    def DataQualityMonitor(resource_name: str,
                           args: DataQualityMonitorArgs,
                           opts: Optional[ResourceOptions] = None)
    
    @overload
    def DataQualityMonitor(resource_name: str,
                           opts: Optional[ResourceOptions] = None,
                           object_id: Optional[str] = None,
                           object_type: Optional[str] = None,
                           anomaly_detection_config: Optional[DataQualityMonitorAnomalyDetectionConfigArgs] = None,
                           data_profiling_config: Optional[DataQualityMonitorDataProfilingConfigArgs] = None)
    func NewDataQualityMonitor(ctx *Context, name string, args DataQualityMonitorArgs, opts ...ResourceOption) (*DataQualityMonitor, error)
    public DataQualityMonitor(string name, DataQualityMonitorArgs args, CustomResourceOptions? opts = null)
    public DataQualityMonitor(String name, DataQualityMonitorArgs args)
    public DataQualityMonitor(String name, DataQualityMonitorArgs args, CustomResourceOptions options)
    
    type: databricks:DataQualityMonitor
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args DataQualityMonitorArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args DataQualityMonitorArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args DataQualityMonitorArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args DataQualityMonitorArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args DataQualityMonitorArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var dataQualityMonitorResource = new Databricks.DataQualityMonitor("dataQualityMonitorResource", new()
    {
        ObjectId = "string",
        ObjectType = "string",
        AnomalyDetectionConfig = null,
        DataProfilingConfig = new Databricks.Inputs.DataQualityMonitorDataProfilingConfigArgs
        {
            OutputSchemaId = "string",
            NotificationSettings = new Databricks.Inputs.DataQualityMonitorDataProfilingConfigNotificationSettingsArgs
            {
                OnFailure = new Databricks.Inputs.DataQualityMonitorDataProfilingConfigNotificationSettingsOnFailureArgs
                {
                    EmailAddresses = new[]
                    {
                        "string",
                    },
                },
            },
            BaselineTableName = "string",
            DashboardId = "string",
            AssetsDir = "string",
            EffectiveWarehouseId = "string",
            InferenceLog = new Databricks.Inputs.DataQualityMonitorDataProfilingConfigInferenceLogArgs
            {
                Granularities = new[]
                {
                    "string",
                },
                ModelIdColumn = "string",
                PredictionColumn = "string",
                ProblemType = "string",
                TimestampColumn = "string",
                LabelColumn = "string",
            },
            LatestMonitorFailureMessage = "string",
            MonitorVersion = 0,
            CustomMetrics = new[]
            {
                new Databricks.Inputs.DataQualityMonitorDataProfilingConfigCustomMetricArgs
                {
                    Definition = "string",
                    InputColumns = new[]
                    {
                        "string",
                    },
                    Name = "string",
                    OutputDataType = "string",
                    Type = "string",
                },
            },
            MonitoredTableName = "string",
            DriftMetricsTableName = "string",
            ProfileMetricsTableName = "string",
            Schedule = new Databricks.Inputs.DataQualityMonitorDataProfilingConfigScheduleArgs
            {
                QuartzCronExpression = "string",
                TimezoneId = "string",
                PauseStatus = "string",
            },
            SkipBuiltinDashboard = false,
            SlicingExprs = new[]
            {
                "string",
            },
            Snapshot = null,
            Status = "string",
            TimeSeries = new Databricks.Inputs.DataQualityMonitorDataProfilingConfigTimeSeriesArgs
            {
                Granularities = new[]
                {
                    "string",
                },
                TimestampColumn = "string",
            },
            WarehouseId = "string",
        },
    });
    
    example, err := databricks.NewDataQualityMonitor(ctx, "dataQualityMonitorResource", &databricks.DataQualityMonitorArgs{
    	ObjectId:               pulumi.String("string"),
    	ObjectType:             pulumi.String("string"),
    	AnomalyDetectionConfig: &databricks.DataQualityMonitorAnomalyDetectionConfigArgs{},
    	DataProfilingConfig: &databricks.DataQualityMonitorDataProfilingConfigArgs{
    		OutputSchemaId: pulumi.String("string"),
    		NotificationSettings: &databricks.DataQualityMonitorDataProfilingConfigNotificationSettingsArgs{
    			OnFailure: &databricks.DataQualityMonitorDataProfilingConfigNotificationSettingsOnFailureArgs{
    				EmailAddresses: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    			},
    		},
    		BaselineTableName:    pulumi.String("string"),
    		DashboardId:          pulumi.String("string"),
    		AssetsDir:            pulumi.String("string"),
    		EffectiveWarehouseId: pulumi.String("string"),
    		InferenceLog: &databricks.DataQualityMonitorDataProfilingConfigInferenceLogArgs{
    			Granularities: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			ModelIdColumn:    pulumi.String("string"),
    			PredictionColumn: pulumi.String("string"),
    			ProblemType:      pulumi.String("string"),
    			TimestampColumn:  pulumi.String("string"),
    			LabelColumn:      pulumi.String("string"),
    		},
    		LatestMonitorFailureMessage: pulumi.String("string"),
    		MonitorVersion:              pulumi.Int(0),
    		CustomMetrics: databricks.DataQualityMonitorDataProfilingConfigCustomMetricArray{
    			&databricks.DataQualityMonitorDataProfilingConfigCustomMetricArgs{
    				Definition: pulumi.String("string"),
    				InputColumns: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				Name:           pulumi.String("string"),
    				OutputDataType: pulumi.String("string"),
    				Type:           pulumi.String("string"),
    			},
    		},
    		MonitoredTableName:      pulumi.String("string"),
    		DriftMetricsTableName:   pulumi.String("string"),
    		ProfileMetricsTableName: pulumi.String("string"),
    		Schedule: &databricks.DataQualityMonitorDataProfilingConfigScheduleArgs{
    			QuartzCronExpression: pulumi.String("string"),
    			TimezoneId:           pulumi.String("string"),
    			PauseStatus:          pulumi.String("string"),
    		},
    		SkipBuiltinDashboard: pulumi.Bool(false),
    		SlicingExprs: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Snapshot: &databricks.DataQualityMonitorDataProfilingConfigSnapshotArgs{},
    		Status:   pulumi.String("string"),
    		TimeSeries: &databricks.DataQualityMonitorDataProfilingConfigTimeSeriesArgs{
    			Granularities: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			TimestampColumn: pulumi.String("string"),
    		},
    		WarehouseId: pulumi.String("string"),
    	},
    })
    
    var dataQualityMonitorResource = new DataQualityMonitor("dataQualityMonitorResource", DataQualityMonitorArgs.builder()
        .objectId("string")
        .objectType("string")
        .anomalyDetectionConfig(DataQualityMonitorAnomalyDetectionConfigArgs.builder()
            .build())
        .dataProfilingConfig(DataQualityMonitorDataProfilingConfigArgs.builder()
            .outputSchemaId("string")
            .notificationSettings(DataQualityMonitorDataProfilingConfigNotificationSettingsArgs.builder()
                .onFailure(DataQualityMonitorDataProfilingConfigNotificationSettingsOnFailureArgs.builder()
                    .emailAddresses("string")
                    .build())
                .build())
            .baselineTableName("string")
            .dashboardId("string")
            .assetsDir("string")
            .effectiveWarehouseId("string")
            .inferenceLog(DataQualityMonitorDataProfilingConfigInferenceLogArgs.builder()
                .granularities("string")
                .modelIdColumn("string")
                .predictionColumn("string")
                .problemType("string")
                .timestampColumn("string")
                .labelColumn("string")
                .build())
            .latestMonitorFailureMessage("string")
            .monitorVersion(0)
            .customMetrics(DataQualityMonitorDataProfilingConfigCustomMetricArgs.builder()
                .definition("string")
                .inputColumns("string")
                .name("string")
                .outputDataType("string")
                .type("string")
                .build())
            .monitoredTableName("string")
            .driftMetricsTableName("string")
            .profileMetricsTableName("string")
            .schedule(DataQualityMonitorDataProfilingConfigScheduleArgs.builder()
                .quartzCronExpression("string")
                .timezoneId("string")
                .pauseStatus("string")
                .build())
            .skipBuiltinDashboard(false)
            .slicingExprs("string")
            .snapshot(DataQualityMonitorDataProfilingConfigSnapshotArgs.builder()
                .build())
            .status("string")
            .timeSeries(DataQualityMonitorDataProfilingConfigTimeSeriesArgs.builder()
                .granularities("string")
                .timestampColumn("string")
                .build())
            .warehouseId("string")
            .build())
        .build());
    
    data_quality_monitor_resource = databricks.DataQualityMonitor("dataQualityMonitorResource",
        object_id="string",
        object_type="string",
        anomaly_detection_config={},
        data_profiling_config={
            "output_schema_id": "string",
            "notification_settings": {
                "on_failure": {
                    "email_addresses": ["string"],
                },
            },
            "baseline_table_name": "string",
            "dashboard_id": "string",
            "assets_dir": "string",
            "effective_warehouse_id": "string",
            "inference_log": {
                "granularities": ["string"],
                "model_id_column": "string",
                "prediction_column": "string",
                "problem_type": "string",
                "timestamp_column": "string",
                "label_column": "string",
            },
            "latest_monitor_failure_message": "string",
            "monitor_version": 0,
            "custom_metrics": [{
                "definition": "string",
                "input_columns": ["string"],
                "name": "string",
                "output_data_type": "string",
                "type": "string",
            }],
            "monitored_table_name": "string",
            "drift_metrics_table_name": "string",
            "profile_metrics_table_name": "string",
            "schedule": {
                "quartz_cron_expression": "string",
                "timezone_id": "string",
                "pause_status": "string",
            },
            "skip_builtin_dashboard": False,
            "slicing_exprs": ["string"],
            "snapshot": {},
            "status": "string",
            "time_series": {
                "granularities": ["string"],
                "timestamp_column": "string",
            },
            "warehouse_id": "string",
        })
    
    const dataQualityMonitorResource = new databricks.DataQualityMonitor("dataQualityMonitorResource", {
        objectId: "string",
        objectType: "string",
        anomalyDetectionConfig: {},
        dataProfilingConfig: {
            outputSchemaId: "string",
            notificationSettings: {
                onFailure: {
                    emailAddresses: ["string"],
                },
            },
            baselineTableName: "string",
            dashboardId: "string",
            assetsDir: "string",
            effectiveWarehouseId: "string",
            inferenceLog: {
                granularities: ["string"],
                modelIdColumn: "string",
                predictionColumn: "string",
                problemType: "string",
                timestampColumn: "string",
                labelColumn: "string",
            },
            latestMonitorFailureMessage: "string",
            monitorVersion: 0,
            customMetrics: [{
                definition: "string",
                inputColumns: ["string"],
                name: "string",
                outputDataType: "string",
                type: "string",
            }],
            monitoredTableName: "string",
            driftMetricsTableName: "string",
            profileMetricsTableName: "string",
            schedule: {
                quartzCronExpression: "string",
                timezoneId: "string",
                pauseStatus: "string",
            },
            skipBuiltinDashboard: false,
            slicingExprs: ["string"],
            snapshot: {},
            status: "string",
            timeSeries: {
                granularities: ["string"],
                timestampColumn: "string",
            },
            warehouseId: "string",
        },
    });
    
    type: databricks:DataQualityMonitor
    properties:
        anomalyDetectionConfig: {}
        dataProfilingConfig:
            assetsDir: string
            baselineTableName: string
            customMetrics:
                - definition: string
                  inputColumns:
                    - string
                  name: string
                  outputDataType: string
                  type: string
            dashboardId: string
            driftMetricsTableName: string
            effectiveWarehouseId: string
            inferenceLog:
                granularities:
                    - string
                labelColumn: string
                modelIdColumn: string
                predictionColumn: string
                problemType: string
                timestampColumn: string
            latestMonitorFailureMessage: string
            monitorVersion: 0
            monitoredTableName: string
            notificationSettings:
                onFailure:
                    emailAddresses:
                        - string
            outputSchemaId: string
            profileMetricsTableName: string
            schedule:
                pauseStatus: string
                quartzCronExpression: string
                timezoneId: string
            skipBuiltinDashboard: false
            slicingExprs:
                - string
            snapshot: {}
            status: string
            timeSeries:
                granularities:
                    - string
                timestampColumn: string
            warehouseId: string
        objectId: string
        objectType: string
    

    DataQualityMonitor Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The DataQualityMonitor resource accepts the following input properties:

    ObjectId string

    The UUID of the request object. It is schema_id for schema, and table_id for table.

    Find the schema_id from either:

    1. The [schema_id](https://docs.databricks.com/api/workspace/schemas/get#schema_id) of the Schemas resource.
    2. In Catalog Explorer > select the schema > go to the Details tab > the Schema ID field.

    Find the table_id from either:

    1. The [table_id](https://docs.databricks.com/api/workspace/tables/get#table_id) of the Tables resource.
    2. In Catalog Explorer > select the table > go to the Details tab > the Table ID field
    ObjectType string
    The type of the monitored object. Can be one of the following: schema or table
    AnomalyDetectionConfig DataQualityMonitorAnomalyDetectionConfig
    Anomaly Detection Configuration, applicable to schema object types
    DataProfilingConfig DataQualityMonitorDataProfilingConfig
    Data Profiling Configuration, applicable to table object types. Exactly one Analysis Configuration must be present
    ObjectId string

    The UUID of the request object. It is schema_id for schema, and table_id for table.

    Find the schema_id from either:

    1. The [schema_id](https://docs.databricks.com/api/workspace/schemas/get#schema_id) of the Schemas resource.
    2. In Catalog Explorer > select the schema > go to the Details tab > the Schema ID field.

    Find the table_id from either:

    1. The [table_id](https://docs.databricks.com/api/workspace/tables/get#table_id) of the Tables resource.
    2. In Catalog Explorer > select the table > go to the Details tab > the Table ID field
    ObjectType string
    The type of the monitored object. Can be one of the following: schema or table
    AnomalyDetectionConfig DataQualityMonitorAnomalyDetectionConfigArgs
    Anomaly Detection Configuration, applicable to schema object types
    DataProfilingConfig DataQualityMonitorDataProfilingConfigArgs
    Data Profiling Configuration, applicable to table object types. Exactly one Analysis Configuration must be present
    objectId String

    The UUID of the request object. It is schema_id for schema, and table_id for table.

    Find the schema_id from either:

    1. The [schema_id](https://docs.databricks.com/api/workspace/schemas/get#schema_id) of the Schemas resource.
    2. In Catalog Explorer > select the schema > go to the Details tab > the Schema ID field.

    Find the table_id from either:

    1. The [table_id](https://docs.databricks.com/api/workspace/tables/get#table_id) of the Tables resource.
    2. In Catalog Explorer > select the table > go to the Details tab > the Table ID field
    objectType String
    The type of the monitored object. Can be one of the following: schema or table
    anomalyDetectionConfig DataQualityMonitorAnomalyDetectionConfig
    Anomaly Detection Configuration, applicable to schema object types
    dataProfilingConfig DataQualityMonitorDataProfilingConfig
    Data Profiling Configuration, applicable to table object types. Exactly one Analysis Configuration must be present
    objectId string

    The UUID of the request object. It is schema_id for schema, and table_id for table.

    Find the schema_id from either:

    1. The [schema_id](https://docs.databricks.com/api/workspace/schemas/get#schema_id) of the Schemas resource.
    2. In Catalog Explorer > select the schema > go to the Details tab > the Schema ID field.

    Find the table_id from either:

    1. The [table_id](https://docs.databricks.com/api/workspace/tables/get#table_id) of the Tables resource.
    2. In Catalog Explorer > select the table > go to the Details tab > the Table ID field
    objectType string
    The type of the monitored object. Can be one of the following: schema or table
    anomalyDetectionConfig DataQualityMonitorAnomalyDetectionConfig
    Anomaly Detection Configuration, applicable to schema object types
    dataProfilingConfig DataQualityMonitorDataProfilingConfig
    Data Profiling Configuration, applicable to table object types. Exactly one Analysis Configuration must be present
    object_id str

    The UUID of the request object. It is schema_id for schema, and table_id for table.

    Find the schema_id from either:

    1. The [schema_id](https://docs.databricks.com/api/workspace/schemas/get#schema_id) of the Schemas resource.
    2. In Catalog Explorer > select the schema > go to the Details tab > the Schema ID field.

    Find the table_id from either:

    1. The [table_id](https://docs.databricks.com/api/workspace/tables/get#table_id) of the Tables resource.
    2. In Catalog Explorer > select the table > go to the Details tab > the Table ID field
    object_type str
    The type of the monitored object. Can be one of the following: schema or table
    anomaly_detection_config DataQualityMonitorAnomalyDetectionConfigArgs
    Anomaly Detection Configuration, applicable to schema object types
    data_profiling_config DataQualityMonitorDataProfilingConfigArgs
    Data Profiling Configuration, applicable to table object types. Exactly one Analysis Configuration must be present
    objectId String

    The UUID of the request object. It is schema_id for schema, and table_id for table.

    Find the schema_id from either:

    1. The [schema_id](https://docs.databricks.com/api/workspace/schemas/get#schema_id) of the Schemas resource.
    2. In Catalog Explorer > select the schema > go to the Details tab > the Schema ID field.

    Find the table_id from either:

    1. The [table_id](https://docs.databricks.com/api/workspace/tables/get#table_id) of the Tables resource.
    2. In Catalog Explorer > select the table > go to the Details tab > the Table ID field
    objectType String
    The type of the monitored object. Can be one of the following: schema or table
    anomalyDetectionConfig Property Map
    Anomaly Detection Configuration, applicable to schema object types
    dataProfilingConfig Property Map
    Data Profiling Configuration, applicable to table object types. Exactly one Analysis Configuration must be present

    Outputs

    All input properties are implicitly available as output properties. Additionally, the DataQualityMonitor resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing DataQualityMonitor Resource

    Get an existing DataQualityMonitor resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: DataQualityMonitorState, opts?: CustomResourceOptions): DataQualityMonitor
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            anomaly_detection_config: Optional[DataQualityMonitorAnomalyDetectionConfigArgs] = None,
            data_profiling_config: Optional[DataQualityMonitorDataProfilingConfigArgs] = None,
            object_id: Optional[str] = None,
            object_type: Optional[str] = None) -> DataQualityMonitor
    func GetDataQualityMonitor(ctx *Context, name string, id IDInput, state *DataQualityMonitorState, opts ...ResourceOption) (*DataQualityMonitor, error)
    public static DataQualityMonitor Get(string name, Input<string> id, DataQualityMonitorState? state, CustomResourceOptions? opts = null)
    public static DataQualityMonitor get(String name, Output<String> id, DataQualityMonitorState state, CustomResourceOptions options)
    resources:  _:    type: databricks:DataQualityMonitor    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AnomalyDetectionConfig DataQualityMonitorAnomalyDetectionConfig
    Anomaly Detection Configuration, applicable to schema object types
    DataProfilingConfig DataQualityMonitorDataProfilingConfig
    Data Profiling Configuration, applicable to table object types. Exactly one Analysis Configuration must be present
    ObjectId string

    The UUID of the request object. It is schema_id for schema, and table_id for table.

    Find the schema_id from either:

    1. The [schema_id](https://docs.databricks.com/api/workspace/schemas/get#schema_id) of the Schemas resource.
    2. In Catalog Explorer > select the schema > go to the Details tab > the Schema ID field.

    Find the table_id from either:

    1. The [table_id](https://docs.databricks.com/api/workspace/tables/get#table_id) of the Tables resource.
    2. In Catalog Explorer > select the table > go to the Details tab > the Table ID field
    ObjectType string
    The type of the monitored object. Can be one of the following: schema or table
    AnomalyDetectionConfig DataQualityMonitorAnomalyDetectionConfigArgs
    Anomaly Detection Configuration, applicable to schema object types
    DataProfilingConfig DataQualityMonitorDataProfilingConfigArgs
    Data Profiling Configuration, applicable to table object types. Exactly one Analysis Configuration must be present
    ObjectId string

    The UUID of the request object. It is schema_id for schema, and table_id for table.

    Find the schema_id from either:

    1. The [schema_id](https://docs.databricks.com/api/workspace/schemas/get#schema_id) of the Schemas resource.
    2. In Catalog Explorer > select the schema > go to the Details tab > the Schema ID field.

    Find the table_id from either:

    1. The [table_id](https://docs.databricks.com/api/workspace/tables/get#table_id) of the Tables resource.
    2. In Catalog Explorer > select the table > go to the Details tab > the Table ID field
    ObjectType string
    The type of the monitored object. Can be one of the following: schema or table
    anomalyDetectionConfig DataQualityMonitorAnomalyDetectionConfig
    Anomaly Detection Configuration, applicable to schema object types
    dataProfilingConfig DataQualityMonitorDataProfilingConfig
    Data Profiling Configuration, applicable to table object types. Exactly one Analysis Configuration must be present
    objectId String

    The UUID of the request object. It is schema_id for schema, and table_id for table.

    Find the schema_id from either:

    1. The [schema_id](https://docs.databricks.com/api/workspace/schemas/get#schema_id) of the Schemas resource.
    2. In Catalog Explorer > select the schema > go to the Details tab > the Schema ID field.

    Find the table_id from either:

    1. The [table_id](https://docs.databricks.com/api/workspace/tables/get#table_id) of the Tables resource.
    2. In Catalog Explorer > select the table > go to the Details tab > the Table ID field
    objectType String
    The type of the monitored object. Can be one of the following: schema or table
    anomalyDetectionConfig DataQualityMonitorAnomalyDetectionConfig
    Anomaly Detection Configuration, applicable to schema object types
    dataProfilingConfig DataQualityMonitorDataProfilingConfig
    Data Profiling Configuration, applicable to table object types. Exactly one Analysis Configuration must be present
    objectId string

    The UUID of the request object. It is schema_id for schema, and table_id for table.

    Find the schema_id from either:

    1. The [schema_id](https://docs.databricks.com/api/workspace/schemas/get#schema_id) of the Schemas resource.
    2. In Catalog Explorer > select the schema > go to the Details tab > the Schema ID field.

    Find the table_id from either:

    1. The [table_id](https://docs.databricks.com/api/workspace/tables/get#table_id) of the Tables resource.
    2. In Catalog Explorer > select the table > go to the Details tab > the Table ID field
    objectType string
    The type of the monitored object. Can be one of the following: schema or table
    anomaly_detection_config DataQualityMonitorAnomalyDetectionConfigArgs
    Anomaly Detection Configuration, applicable to schema object types
    data_profiling_config DataQualityMonitorDataProfilingConfigArgs
    Data Profiling Configuration, applicable to table object types. Exactly one Analysis Configuration must be present
    object_id str

    The UUID of the request object. It is schema_id for schema, and table_id for table.

    Find the schema_id from either:

    1. The [schema_id](https://docs.databricks.com/api/workspace/schemas/get#schema_id) of the Schemas resource.
    2. In Catalog Explorer > select the schema > go to the Details tab > the Schema ID field.

    Find the table_id from either:

    1. The [table_id](https://docs.databricks.com/api/workspace/tables/get#table_id) of the Tables resource.
    2. In Catalog Explorer > select the table > go to the Details tab > the Table ID field
    object_type str
    The type of the monitored object. Can be one of the following: schema or table
    anomalyDetectionConfig Property Map
    Anomaly Detection Configuration, applicable to schema object types
    dataProfilingConfig Property Map
    Data Profiling Configuration, applicable to table object types. Exactly one Analysis Configuration must be present
    objectId String

    The UUID of the request object. It is schema_id for schema, and table_id for table.

    Find the schema_id from either:

    1. The [schema_id](https://docs.databricks.com/api/workspace/schemas/get#schema_id) of the Schemas resource.
    2. In Catalog Explorer > select the schema > go to the Details tab > the Schema ID field.

    Find the table_id from either:

    1. The [table_id](https://docs.databricks.com/api/workspace/tables/get#table_id) of the Tables resource.
    2. In Catalog Explorer > select the table > go to the Details tab > the Table ID field
    objectType String
    The type of the monitored object. Can be one of the following: schema or table

    Supporting Types

    DataQualityMonitorDataProfilingConfig, DataQualityMonitorDataProfilingConfigArgs

    OutputSchemaId string
    ID of the schema where output tables are created
    AssetsDir string
    Field for specifying the absolute path to a custom directory to store data-monitoring assets. Normally prepopulated to a default user location via UI and Python APIs
    BaselineTableName string
    Baseline table name. Baseline data is used to compute drift from the data in the monitored table_name. The baseline table and the monitored table shall have the same schema
    CustomMetrics List<DataQualityMonitorDataProfilingConfigCustomMetric>
    Custom metrics
    DashboardId string
    DriftMetricsTableName string
    EffectiveWarehouseId string
    InferenceLog DataQualityMonitorDataProfilingConfigInferenceLog
    Analysis Configuration for monitoring inference log tables
    LatestMonitorFailureMessage string
    MonitorVersion int
    MonitoredTableName string
    NotificationSettings DataQualityMonitorDataProfilingConfigNotificationSettings
    Field for specifying notification settings
    ProfileMetricsTableName string
    Schedule DataQualityMonitorDataProfilingConfigSchedule
    The cron schedule
    SkipBuiltinDashboard bool
    Whether to skip creating a default dashboard summarizing data quality metrics
    SlicingExprs List<string>
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For example slicing_exprs=[“col_1”, “col_2 > 10”] will generate the following slices: two slices for col_2 </span>> 10 (True and False), and one slice per unique value in col1. For high-cardinality columns, only the top 100 unique values by frequency will generate slices
    Snapshot DataQualityMonitorDataProfilingConfigSnapshot
    Analysis Configuration for monitoring snapshot tables
    Status string
    TimeSeries DataQualityMonitorDataProfilingConfigTimeSeries
    Analysis Configuration for monitoring time series tables
    WarehouseId string
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used
    OutputSchemaId string
    ID of the schema where output tables are created
    AssetsDir string
    Field for specifying the absolute path to a custom directory to store data-monitoring assets. Normally prepopulated to a default user location via UI and Python APIs
    BaselineTableName string
    Baseline table name. Baseline data is used to compute drift from the data in the monitored table_name. The baseline table and the monitored table shall have the same schema
    CustomMetrics []DataQualityMonitorDataProfilingConfigCustomMetric
    Custom metrics
    DashboardId string
    DriftMetricsTableName string
    EffectiveWarehouseId string
    InferenceLog DataQualityMonitorDataProfilingConfigInferenceLog
    Analysis Configuration for monitoring inference log tables
    LatestMonitorFailureMessage string
    MonitorVersion int
    MonitoredTableName string
    NotificationSettings DataQualityMonitorDataProfilingConfigNotificationSettings
    Field for specifying notification settings
    ProfileMetricsTableName string
    Schedule DataQualityMonitorDataProfilingConfigSchedule
    The cron schedule
    SkipBuiltinDashboard bool
    Whether to skip creating a default dashboard summarizing data quality metrics
    SlicingExprs []string
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For example slicing_exprs=[“col_1”, “col_2 > 10”] will generate the following slices: two slices for col_2 </span>> 10 (True and False), and one slice per unique value in col1. For high-cardinality columns, only the top 100 unique values by frequency will generate slices
    Snapshot DataQualityMonitorDataProfilingConfigSnapshot
    Analysis Configuration for monitoring snapshot tables
    Status string
    TimeSeries DataQualityMonitorDataProfilingConfigTimeSeries
    Analysis Configuration for monitoring time series tables
    WarehouseId string
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used
    outputSchemaId String
    ID of the schema where output tables are created
    assetsDir String
    Field for specifying the absolute path to a custom directory to store data-monitoring assets. Normally prepopulated to a default user location via UI and Python APIs
    baselineTableName String
    Baseline table name. Baseline data is used to compute drift from the data in the monitored table_name. The baseline table and the monitored table shall have the same schema
    customMetrics List<DataQualityMonitorDataProfilingConfigCustomMetric>
    Custom metrics
    dashboardId String
    driftMetricsTableName String
    effectiveWarehouseId String
    inferenceLog DataQualityMonitorDataProfilingConfigInferenceLog
    Analysis Configuration for monitoring inference log tables
    latestMonitorFailureMessage String
    monitorVersion Integer
    monitoredTableName String
    notificationSettings DataQualityMonitorDataProfilingConfigNotificationSettings
    Field for specifying notification settings
    profileMetricsTableName String
    schedule DataQualityMonitorDataProfilingConfigSchedule
    The cron schedule
    skipBuiltinDashboard Boolean
    Whether to skip creating a default dashboard summarizing data quality metrics
    slicingExprs List<String>
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For example slicing_exprs=[“col_1”, “col_2 > 10”] will generate the following slices: two slices for col_2 </span>> 10 (True and False), and one slice per unique value in col1. For high-cardinality columns, only the top 100 unique values by frequency will generate slices
    snapshot DataQualityMonitorDataProfilingConfigSnapshot
    Analysis Configuration for monitoring snapshot tables
    status String
    timeSeries DataQualityMonitorDataProfilingConfigTimeSeries
    Analysis Configuration for monitoring time series tables
    warehouseId String
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used
    outputSchemaId string
    ID of the schema where output tables are created
    assetsDir string
    Field for specifying the absolute path to a custom directory to store data-monitoring assets. Normally prepopulated to a default user location via UI and Python APIs
    baselineTableName string
    Baseline table name. Baseline data is used to compute drift from the data in the monitored table_name. The baseline table and the monitored table shall have the same schema
    customMetrics DataQualityMonitorDataProfilingConfigCustomMetric[]
    Custom metrics
    dashboardId string
    driftMetricsTableName string
    effectiveWarehouseId string
    inferenceLog DataQualityMonitorDataProfilingConfigInferenceLog
    Analysis Configuration for monitoring inference log tables
    latestMonitorFailureMessage string
    monitorVersion number
    monitoredTableName string
    notificationSettings DataQualityMonitorDataProfilingConfigNotificationSettings
    Field for specifying notification settings
    profileMetricsTableName string
    schedule DataQualityMonitorDataProfilingConfigSchedule
    The cron schedule
    skipBuiltinDashboard boolean
    Whether to skip creating a default dashboard summarizing data quality metrics
    slicingExprs string[]
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For example slicing_exprs=[“col_1”, “col_2 > 10”] will generate the following slices: two slices for col_2 </span>> 10 (True and False), and one slice per unique value in col1. For high-cardinality columns, only the top 100 unique values by frequency will generate slices
    snapshot DataQualityMonitorDataProfilingConfigSnapshot
    Analysis Configuration for monitoring snapshot tables
    status string
    timeSeries DataQualityMonitorDataProfilingConfigTimeSeries
    Analysis Configuration for monitoring time series tables
    warehouseId string
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used
    output_schema_id str
    ID of the schema where output tables are created
    assets_dir str
    Field for specifying the absolute path to a custom directory to store data-monitoring assets. Normally prepopulated to a default user location via UI and Python APIs
    baseline_table_name str
    Baseline table name. Baseline data is used to compute drift from the data in the monitored table_name. The baseline table and the monitored table shall have the same schema
    custom_metrics Sequence[DataQualityMonitorDataProfilingConfigCustomMetric]
    Custom metrics
    dashboard_id str
    drift_metrics_table_name str
    effective_warehouse_id str
    inference_log DataQualityMonitorDataProfilingConfigInferenceLog
    Analysis Configuration for monitoring inference log tables
    latest_monitor_failure_message str
    monitor_version int
    monitored_table_name str
    notification_settings DataQualityMonitorDataProfilingConfigNotificationSettings
    Field for specifying notification settings
    profile_metrics_table_name str
    schedule DataQualityMonitorDataProfilingConfigSchedule
    The cron schedule
    skip_builtin_dashboard bool
    Whether to skip creating a default dashboard summarizing data quality metrics
    slicing_exprs Sequence[str]
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For example slicing_exprs=[“col_1”, “col_2 > 10”] will generate the following slices: two slices for col_2 </span>> 10 (True and False), and one slice per unique value in col1. For high-cardinality columns, only the top 100 unique values by frequency will generate slices
    snapshot DataQualityMonitorDataProfilingConfigSnapshot
    Analysis Configuration for monitoring snapshot tables
    status str
    time_series DataQualityMonitorDataProfilingConfigTimeSeries
    Analysis Configuration for monitoring time series tables
    warehouse_id str
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used
    outputSchemaId String
    ID of the schema where output tables are created
    assetsDir String
    Field for specifying the absolute path to a custom directory to store data-monitoring assets. Normally prepopulated to a default user location via UI and Python APIs
    baselineTableName String
    Baseline table name. Baseline data is used to compute drift from the data in the monitored table_name. The baseline table and the monitored table shall have the same schema
    customMetrics List<Property Map>
    Custom metrics
    dashboardId String
    driftMetricsTableName String
    effectiveWarehouseId String
    inferenceLog Property Map
    Analysis Configuration for monitoring inference log tables
    latestMonitorFailureMessage String
    monitorVersion Number
    monitoredTableName String
    notificationSettings Property Map
    Field for specifying notification settings
    profileMetricsTableName String
    schedule Property Map
    The cron schedule
    skipBuiltinDashboard Boolean
    Whether to skip creating a default dashboard summarizing data quality metrics
    slicingExprs List<String>
    List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For example slicing_exprs=[“col_1”, “col_2 > 10”] will generate the following slices: two slices for col_2 </span>> 10 (True and False), and one slice per unique value in col1. For high-cardinality columns, only the top 100 unique values by frequency will generate slices
    snapshot Property Map
    Analysis Configuration for monitoring snapshot tables
    status String
    timeSeries Property Map
    Analysis Configuration for monitoring time series tables
    warehouseId String
    Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used

    DataQualityMonitorDataProfilingConfigCustomMetric, DataQualityMonitorDataProfilingConfigCustomMetricArgs

    Definition string
    Jinja template for a SQL expression that specifies how to compute the metric. See create metric definition
    InputColumns List<string>
    A list of column names in the input table the metric should be computed for. Can use ":table" to indicate that the metric needs information from multiple columns
    Name string
    Name of the metric in the output tables
    OutputDataType string
    The output type of the custom metric
    Type string
    The type of the custom metric. Possible values are: DATA_PROFILING_CUSTOM_METRIC_TYPE_AGGREGATE, DATA_PROFILING_CUSTOM_METRIC_TYPE_DERIVED, DATA_PROFILING_CUSTOM_METRIC_TYPE_DRIFT
    Definition string
    Jinja template for a SQL expression that specifies how to compute the metric. See create metric definition
    InputColumns []string
    A list of column names in the input table the metric should be computed for. Can use ":table" to indicate that the metric needs information from multiple columns
    Name string
    Name of the metric in the output tables
    OutputDataType string
    The output type of the custom metric
    Type string
    The type of the custom metric. Possible values are: DATA_PROFILING_CUSTOM_METRIC_TYPE_AGGREGATE, DATA_PROFILING_CUSTOM_METRIC_TYPE_DERIVED, DATA_PROFILING_CUSTOM_METRIC_TYPE_DRIFT
    definition String
    Jinja template for a SQL expression that specifies how to compute the metric. See create metric definition
    inputColumns List<String>
    A list of column names in the input table the metric should be computed for. Can use ":table" to indicate that the metric needs information from multiple columns
    name String
    Name of the metric in the output tables
    outputDataType String
    The output type of the custom metric
    type String
    The type of the custom metric. Possible values are: DATA_PROFILING_CUSTOM_METRIC_TYPE_AGGREGATE, DATA_PROFILING_CUSTOM_METRIC_TYPE_DERIVED, DATA_PROFILING_CUSTOM_METRIC_TYPE_DRIFT
    definition string
    Jinja template for a SQL expression that specifies how to compute the metric. See create metric definition
    inputColumns string[]
    A list of column names in the input table the metric should be computed for. Can use ":table" to indicate that the metric needs information from multiple columns
    name string
    Name of the metric in the output tables
    outputDataType string
    The output type of the custom metric
    type string
    The type of the custom metric. Possible values are: DATA_PROFILING_CUSTOM_METRIC_TYPE_AGGREGATE, DATA_PROFILING_CUSTOM_METRIC_TYPE_DERIVED, DATA_PROFILING_CUSTOM_METRIC_TYPE_DRIFT
    definition str
    Jinja template for a SQL expression that specifies how to compute the metric. See create metric definition
    input_columns Sequence[str]
    A list of column names in the input table the metric should be computed for. Can use ":table" to indicate that the metric needs information from multiple columns
    name str
    Name of the metric in the output tables
    output_data_type str
    The output type of the custom metric
    type str
    The type of the custom metric. Possible values are: DATA_PROFILING_CUSTOM_METRIC_TYPE_AGGREGATE, DATA_PROFILING_CUSTOM_METRIC_TYPE_DERIVED, DATA_PROFILING_CUSTOM_METRIC_TYPE_DRIFT
    definition String
    Jinja template for a SQL expression that specifies how to compute the metric. See create metric definition
    inputColumns List<String>
    A list of column names in the input table the metric should be computed for. Can use ":table" to indicate that the metric needs information from multiple columns
    name String
    Name of the metric in the output tables
    outputDataType String
    The output type of the custom metric
    type String
    The type of the custom metric. Possible values are: DATA_PROFILING_CUSTOM_METRIC_TYPE_AGGREGATE, DATA_PROFILING_CUSTOM_METRIC_TYPE_DERIVED, DATA_PROFILING_CUSTOM_METRIC_TYPE_DRIFT

    DataQualityMonitorDataProfilingConfigInferenceLog, DataQualityMonitorDataProfilingConfigInferenceLogArgs

    Granularities List<string>
    ModelIdColumn string
    Column for the model identifier
    PredictionColumn string
    Column for the prediction
    ProblemType string
    Problem type the model aims to solve. Possible values are: INFERENCE_PROBLEM_TYPE_CLASSIFICATION, INFERENCE_PROBLEM_TYPE_REGRESSION
    TimestampColumn string
    LabelColumn string
    Column for the label
    Granularities []string
    ModelIdColumn string
    Column for the model identifier
    PredictionColumn string
    Column for the prediction
    ProblemType string
    Problem type the model aims to solve. Possible values are: INFERENCE_PROBLEM_TYPE_CLASSIFICATION, INFERENCE_PROBLEM_TYPE_REGRESSION
    TimestampColumn string
    LabelColumn string
    Column for the label
    granularities List<String>
    modelIdColumn String
    Column for the model identifier
    predictionColumn String
    Column for the prediction
    problemType String
    Problem type the model aims to solve. Possible values are: INFERENCE_PROBLEM_TYPE_CLASSIFICATION, INFERENCE_PROBLEM_TYPE_REGRESSION
    timestampColumn String
    labelColumn String
    Column for the label
    granularities string[]
    modelIdColumn string
    Column for the model identifier
    predictionColumn string
    Column for the prediction
    problemType string
    Problem type the model aims to solve. Possible values are: INFERENCE_PROBLEM_TYPE_CLASSIFICATION, INFERENCE_PROBLEM_TYPE_REGRESSION
    timestampColumn string
    labelColumn string
    Column for the label
    granularities Sequence[str]
    model_id_column str
    Column for the model identifier
    prediction_column str
    Column for the prediction
    problem_type str
    Problem type the model aims to solve. Possible values are: INFERENCE_PROBLEM_TYPE_CLASSIFICATION, INFERENCE_PROBLEM_TYPE_REGRESSION
    timestamp_column str
    label_column str
    Column for the label
    granularities List<String>
    modelIdColumn String
    Column for the model identifier
    predictionColumn String
    Column for the prediction
    problemType String
    Problem type the model aims to solve. Possible values are: INFERENCE_PROBLEM_TYPE_CLASSIFICATION, INFERENCE_PROBLEM_TYPE_REGRESSION
    timestampColumn String
    labelColumn String
    Column for the label

    DataQualityMonitorDataProfilingConfigNotificationSettings, DataQualityMonitorDataProfilingConfigNotificationSettingsArgs

    OnFailure DataQualityMonitorDataProfilingConfigNotificationSettingsOnFailure
    Destinations to send notifications on failure/timeout
    OnFailure DataQualityMonitorDataProfilingConfigNotificationSettingsOnFailure
    Destinations to send notifications on failure/timeout
    onFailure DataQualityMonitorDataProfilingConfigNotificationSettingsOnFailure
    Destinations to send notifications on failure/timeout
    onFailure DataQualityMonitorDataProfilingConfigNotificationSettingsOnFailure
    Destinations to send notifications on failure/timeout
    on_failure DataQualityMonitorDataProfilingConfigNotificationSettingsOnFailure
    Destinations to send notifications on failure/timeout
    onFailure Property Map
    Destinations to send notifications on failure/timeout

    DataQualityMonitorDataProfilingConfigNotificationSettingsOnFailure, DataQualityMonitorDataProfilingConfigNotificationSettingsOnFailureArgs

    EmailAddresses List<string>
    The list of email addresses to send the notification to. A maximum of 5 email addresses is supported
    EmailAddresses []string
    The list of email addresses to send the notification to. A maximum of 5 email addresses is supported
    emailAddresses List<String>
    The list of email addresses to send the notification to. A maximum of 5 email addresses is supported
    emailAddresses string[]
    The list of email addresses to send the notification to. A maximum of 5 email addresses is supported
    email_addresses Sequence[str]
    The list of email addresses to send the notification to. A maximum of 5 email addresses is supported
    emailAddresses List<String>
    The list of email addresses to send the notification to. A maximum of 5 email addresses is supported

    DataQualityMonitorDataProfilingConfigSchedule, DataQualityMonitorDataProfilingConfigScheduleArgs

    QuartzCronExpression string
    The expression that determines when to run the monitor. See examples
    TimezoneId string
    A Java timezone id. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone <http://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html>_ for details. The timezone id (e.g., America/Los_Angeles) in which to evaluate the quartz expression
    PauseStatus string
    QuartzCronExpression string
    The expression that determines when to run the monitor. See examples
    TimezoneId string
    A Java timezone id. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone <http://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html>_ for details. The timezone id (e.g., America/Los_Angeles) in which to evaluate the quartz expression
    PauseStatus string
    quartzCronExpression String
    The expression that determines when to run the monitor. See examples
    timezoneId String
    A Java timezone id. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone <http://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html>_ for details. The timezone id (e.g., America/Los_Angeles) in which to evaluate the quartz expression
    pauseStatus String
    quartzCronExpression string
    The expression that determines when to run the monitor. See examples
    timezoneId string
    A Java timezone id. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone <http://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html>_ for details. The timezone id (e.g., America/Los_Angeles) in which to evaluate the quartz expression
    pauseStatus string
    quartz_cron_expression str
    The expression that determines when to run the monitor. See examples
    timezone_id str
    A Java timezone id. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone <http://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html>_ for details. The timezone id (e.g., America/Los_Angeles) in which to evaluate the quartz expression
    pause_status str
    quartzCronExpression String
    The expression that determines when to run the monitor. See examples
    timezoneId String
    A Java timezone id. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone <http://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html>_ for details. The timezone id (e.g., America/Los_Angeles) in which to evaluate the quartz expression
    pauseStatus String

    DataQualityMonitorDataProfilingConfigTimeSeries, DataQualityMonitorDataProfilingConfigTimeSeriesArgs

    Granularities List<string>
    TimestampColumn string
    granularities List<String>
    timestampColumn String
    granularities List<String>
    timestampColumn String

    Import

    As of Pulumi v1.5, resources can be imported through configuration.

    hcl

    import {

    id = “object_type,object_id”

    to = databricks_data_quality_monitor.this

    }

    If you are using an older version of Pulumi, import the resource using the pulumi import command as follows:

    $ pulumi import databricks:index/dataQualityMonitor:DataQualityMonitor this "object_type,object_id"
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    databricks pulumi/pulumi-databricks
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the databricks Terraform Provider.
    databricks logo
    Databricks v1.78.0 published on Friday, Nov 7, 2025 by Pulumi
      Meet Neo: Your AI Platform Teammate