1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. dataplex
  5. Datascan
Google Cloud Classic v6.67.0 published on Wednesday, Sep 27, 2023 by Pulumi

gcp.dataplex.Datascan

Explore with Pulumi AI

gcp logo
Google Cloud Classic v6.67.0 published on Wednesday, Sep 27, 2023 by Pulumi

    Represents a user-visible job which provides the insights for the related data source.

    To get more information about Datascan, see:

    Example Usage

    Dataplex Datascan Basic Profile

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var basicProfile = new Gcp.DataPlex.Datascan("basicProfile", new()
        {
            Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
            {
                Resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
            },
            DataProfileSpec = null,
            DataScanId = "dataprofile-basic",
            ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
            {
                Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
                {
                    OnDemand = null,
                },
            },
            Location = "us-central1",
            Project = "my-project-name",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataplex"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dataplex.NewDatascan(ctx, "basicProfile", &dataplex.DatascanArgs{
    			Data: &dataplex.DatascanDataArgs{
    				Resource: pulumi.String("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare"),
    			},
    			DataProfileSpec: nil,
    			DataScanId:      pulumi.String("dataprofile-basic"),
    			ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
    				Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
    					OnDemand: nil,
    				},
    			},
    			Location: pulumi.String("us-central1"),
    			Project:  pulumi.String("my-project-name"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.dataplex.Datascan;
    import com.pulumi.gcp.dataplex.DatascanArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanDataArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerOnDemandArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var basicProfile = new Datascan("basicProfile", DatascanArgs.builder()        
                .data(DatascanDataArgs.builder()
                    .resource("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare")
                    .build())
                .dataProfileSpec()
                .dataScanId("dataprofile-basic")
                .executionSpec(DatascanExecutionSpecArgs.builder()
                    .trigger(DatascanExecutionSpecTriggerArgs.builder()
                        .onDemand()
                        .build())
                    .build())
                .location("us-central1")
                .project("my-project-name")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    basic_profile = gcp.dataplex.Datascan("basicProfile",
        data=gcp.dataplex.DatascanDataArgs(
            resource="//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
        ),
        data_profile_spec=gcp.dataplex.DatascanDataProfileSpecArgs(),
        data_scan_id="dataprofile-basic",
        execution_spec=gcp.dataplex.DatascanExecutionSpecArgs(
            trigger=gcp.dataplex.DatascanExecutionSpecTriggerArgs(
                on_demand=gcp.dataplex.DatascanExecutionSpecTriggerOnDemandArgs(),
            ),
        ),
        location="us-central1",
        project="my-project-name")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const basicProfile = new gcp.dataplex.Datascan("basicProfile", {
        data: {
            resource: "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
        },
        dataProfileSpec: {},
        dataScanId: "dataprofile-basic",
        executionSpec: {
            trigger: {
                onDemand: {},
            },
        },
        location: "us-central1",
        project: "my-project-name",
    });
    
    resources:
      basicProfile:
        type: gcp:dataplex:Datascan
        properties:
          data:
            resource: //bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare
          dataProfileSpec: {}
          dataScanId: dataprofile-basic
          executionSpec:
            trigger:
              onDemand: {}
          location: us-central1
          project: my-project-name
    

    Dataplex Datascan Full Profile

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var source = new Gcp.BigQuery.Dataset("source", new()
        {
            DatasetId = "dataplex_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
            DeleteContentsOnDestroy = true,
        });
    
        var fullProfile = new Gcp.DataPlex.Datascan("fullProfile", new()
        {
            Location = "us-central1",
            DisplayName = "Full Datascan Profile",
            DataScanId = "dataprofile-full",
            Description = "Example resource - Full Datascan Profile",
            Labels = 
            {
                { "author", "billing" },
            },
            Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
            {
                Resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
            },
            ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
            {
                Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
                {
                    Schedule = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerScheduleArgs
                    {
                        Cron = "TZ=America/New_York 1 1 * * *",
                    },
                },
            },
            DataProfileSpec = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecArgs
            {
                SamplingPercent = 80,
                RowFilter = "word_count > 10",
                IncludeFields = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecIncludeFieldsArgs
                {
                    FieldNames = new[]
                    {
                        "word_count",
                    },
                },
                ExcludeFields = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecExcludeFieldsArgs
                {
                    FieldNames = new[]
                    {
                        "property_type",
                    },
                },
                PostScanActions = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecPostScanActionsArgs
                {
                    BigqueryExport = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs
                    {
                        ResultsTable = "//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export",
                    },
                },
            },
            Project = "my-project-name",
        }, new CustomResourceOptions
        {
            DependsOn = new[]
            {
                source,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/bigquery"
    	"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataplex"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		source, err := bigquery.NewDataset(ctx, "source", &bigquery.DatasetArgs{
    			DatasetId:               pulumi.String("dataplex_dataset"),
    			FriendlyName:            pulumi.String("test"),
    			Description:             pulumi.String("This is a test description"),
    			Location:                pulumi.String("US"),
    			DeleteContentsOnDestroy: pulumi.Bool(true),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = dataplex.NewDatascan(ctx, "fullProfile", &dataplex.DatascanArgs{
    			Location:    pulumi.String("us-central1"),
    			DisplayName: pulumi.String("Full Datascan Profile"),
    			DataScanId:  pulumi.String("dataprofile-full"),
    			Description: pulumi.String("Example resource - Full Datascan Profile"),
    			Labels: pulumi.StringMap{
    				"author": pulumi.String("billing"),
    			},
    			Data: &dataplex.DatascanDataArgs{
    				Resource: pulumi.String("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare"),
    			},
    			ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
    				Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
    					Schedule: &dataplex.DatascanExecutionSpecTriggerScheduleArgs{
    						Cron: pulumi.String("TZ=America/New_York 1 1 * * *"),
    					},
    				},
    			},
    			DataProfileSpec: &dataplex.DatascanDataProfileSpecArgs{
    				SamplingPercent: pulumi.Float64(80),
    				RowFilter:       pulumi.String("word_count > 10"),
    				IncludeFields: &dataplex.DatascanDataProfileSpecIncludeFieldsArgs{
    					FieldNames: pulumi.StringArray{
    						pulumi.String("word_count"),
    					},
    				},
    				ExcludeFields: &dataplex.DatascanDataProfileSpecExcludeFieldsArgs{
    					FieldNames: pulumi.StringArray{
    						pulumi.String("property_type"),
    					},
    				},
    				PostScanActions: &dataplex.DatascanDataProfileSpecPostScanActionsArgs{
    					BigqueryExport: &dataplex.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs{
    						ResultsTable: pulumi.String("//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export"),
    					},
    				},
    			},
    			Project: pulumi.String("my-project-name"),
    		}, pulumi.DependsOn([]pulumi.Resource{
    			source,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.dataplex.Datascan;
    import com.pulumi.gcp.dataplex.DatascanArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanDataArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerScheduleArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecIncludeFieldsArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecExcludeFieldsArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecPostScanActionsArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var source = new Dataset("source", DatasetArgs.builder()        
                .datasetId("dataplex_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .deleteContentsOnDestroy(true)
                .build());
    
            var fullProfile = new Datascan("fullProfile", DatascanArgs.builder()        
                .location("us-central1")
                .displayName("Full Datascan Profile")
                .dataScanId("dataprofile-full")
                .description("Example resource - Full Datascan Profile")
                .labels(Map.of("author", "billing"))
                .data(DatascanDataArgs.builder()
                    .resource("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare")
                    .build())
                .executionSpec(DatascanExecutionSpecArgs.builder()
                    .trigger(DatascanExecutionSpecTriggerArgs.builder()
                        .schedule(DatascanExecutionSpecTriggerScheduleArgs.builder()
                            .cron("TZ=America/New_York 1 1 * * *")
                            .build())
                        .build())
                    .build())
                .dataProfileSpec(DatascanDataProfileSpecArgs.builder()
                    .samplingPercent(80)
                    .rowFilter("word_count > 10")
                    .includeFields(DatascanDataProfileSpecIncludeFieldsArgs.builder()
                        .fieldNames("word_count")
                        .build())
                    .excludeFields(DatascanDataProfileSpecExcludeFieldsArgs.builder()
                        .fieldNames("property_type")
                        .build())
                    .postScanActions(DatascanDataProfileSpecPostScanActionsArgs.builder()
                        .bigqueryExport(DatascanDataProfileSpecPostScanActionsBigqueryExportArgs.builder()
                            .resultsTable("//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export")
                            .build())
                        .build())
                    .build())
                .project("my-project-name")
                .build(), CustomResourceOptions.builder()
                    .dependsOn(source)
                    .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    source = gcp.bigquery.Dataset("source",
        dataset_id="dataplex_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US",
        delete_contents_on_destroy=True)
    full_profile = gcp.dataplex.Datascan("fullProfile",
        location="us-central1",
        display_name="Full Datascan Profile",
        data_scan_id="dataprofile-full",
        description="Example resource - Full Datascan Profile",
        labels={
            "author": "billing",
        },
        data=gcp.dataplex.DatascanDataArgs(
            resource="//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
        ),
        execution_spec=gcp.dataplex.DatascanExecutionSpecArgs(
            trigger=gcp.dataplex.DatascanExecutionSpecTriggerArgs(
                schedule=gcp.dataplex.DatascanExecutionSpecTriggerScheduleArgs(
                    cron="TZ=America/New_York 1 1 * * *",
                ),
            ),
        ),
        data_profile_spec=gcp.dataplex.DatascanDataProfileSpecArgs(
            sampling_percent=80,
            row_filter="word_count > 10",
            include_fields=gcp.dataplex.DatascanDataProfileSpecIncludeFieldsArgs(
                field_names=["word_count"],
            ),
            exclude_fields=gcp.dataplex.DatascanDataProfileSpecExcludeFieldsArgs(
                field_names=["property_type"],
            ),
            post_scan_actions=gcp.dataplex.DatascanDataProfileSpecPostScanActionsArgs(
                bigquery_export=gcp.dataplex.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs(
                    results_table="//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export",
                ),
            ),
        ),
        project="my-project-name",
        opts=pulumi.ResourceOptions(depends_on=[source]))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const source = new gcp.bigquery.Dataset("source", {
        datasetId: "dataplex_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
        deleteContentsOnDestroy: true,
    });
    const fullProfile = new gcp.dataplex.Datascan("fullProfile", {
        location: "us-central1",
        displayName: "Full Datascan Profile",
        dataScanId: "dataprofile-full",
        description: "Example resource - Full Datascan Profile",
        labels: {
            author: "billing",
        },
        data: {
            resource: "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
        },
        executionSpec: {
            trigger: {
                schedule: {
                    cron: "TZ=America/New_York 1 1 * * *",
                },
            },
        },
        dataProfileSpec: {
            samplingPercent: 80,
            rowFilter: "word_count > 10",
            includeFields: {
                fieldNames: ["word_count"],
            },
            excludeFields: {
                fieldNames: ["property_type"],
            },
            postScanActions: {
                bigqueryExport: {
                    resultsTable: "//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export",
                },
            },
        },
        project: "my-project-name",
    }, {
        dependsOn: [source],
    });
    
    resources:
      fullProfile:
        type: gcp:dataplex:Datascan
        properties:
          location: us-central1
          displayName: Full Datascan Profile
          dataScanId: dataprofile-full
          description: Example resource - Full Datascan Profile
          labels:
            author: billing
          data:
            resource: //bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare
          executionSpec:
            trigger:
              schedule:
                cron: TZ=America/New_York 1 1 * * *
          dataProfileSpec:
            samplingPercent: 80
            rowFilter: word_count > 10
            includeFields:
              fieldNames:
                - word_count
            excludeFields:
              fieldNames:
                - property_type
            postScanActions:
              bigqueryExport:
                resultsTable: //bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export
          project: my-project-name
        options:
          dependson:
            - ${source}
      source:
        type: gcp:bigquery:Dataset
        properties:
          datasetId: dataplex_dataset
          friendlyName: test
          description: This is a test description
          location: US
          deleteContentsOnDestroy: true
    

    Dataplex Datascan Basic Quality

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var basicQuality = new Gcp.DataPlex.Datascan("basicQuality", new()
        {
            Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
            {
                Resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
            },
            DataQualitySpec = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecArgs
            {
                Rules = new[]
                {
                    new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                    {
                        Description = "rule 1 for validity dimension",
                        Dimension = "VALIDITY",
                        Name = "rule1",
                        TableConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleTableConditionExpectationArgs
                        {
                            SqlExpression = "COUNT(*) > 0",
                        },
                    },
                },
            },
            DataScanId = "dataquality-basic",
            ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
            {
                Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
                {
                    OnDemand = null,
                },
            },
            Location = "us-central1",
            Project = "my-project-name",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataplex"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dataplex.NewDatascan(ctx, "basicQuality", &dataplex.DatascanArgs{
    			Data: &dataplex.DatascanDataArgs{
    				Resource: pulumi.String("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare"),
    			},
    			DataQualitySpec: &dataplex.DatascanDataQualitySpecArgs{
    				Rules: dataplex.DatascanDataQualitySpecRuleArray{
    					&dataplex.DatascanDataQualitySpecRuleArgs{
    						Description: pulumi.String("rule 1 for validity dimension"),
    						Dimension:   pulumi.String("VALIDITY"),
    						Name:        pulumi.String("rule1"),
    						TableConditionExpectation: &dataplex.DatascanDataQualitySpecRuleTableConditionExpectationArgs{
    							SqlExpression: pulumi.String("COUNT(*) > 0"),
    						},
    					},
    				},
    			},
    			DataScanId: pulumi.String("dataquality-basic"),
    			ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
    				Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
    					OnDemand: nil,
    				},
    			},
    			Location: pulumi.String("us-central1"),
    			Project:  pulumi.String("my-project-name"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.dataplex.Datascan;
    import com.pulumi.gcp.dataplex.DatascanArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanDataArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanDataQualitySpecArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerOnDemandArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var basicQuality = new Datascan("basicQuality", DatascanArgs.builder()        
                .data(DatascanDataArgs.builder()
                    .resource("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare")
                    .build())
                .dataQualitySpec(DatascanDataQualitySpecArgs.builder()
                    .rules(DatascanDataQualitySpecRuleArgs.builder()
                        .description("rule 1 for validity dimension")
                        .dimension("VALIDITY")
                        .name("rule1")
                        .tableConditionExpectation(DatascanDataQualitySpecRuleTableConditionExpectationArgs.builder()
                            .sqlExpression("COUNT(*) > 0")
                            .build())
                        .build())
                    .build())
                .dataScanId("dataquality-basic")
                .executionSpec(DatascanExecutionSpecArgs.builder()
                    .trigger(DatascanExecutionSpecTriggerArgs.builder()
                        .onDemand()
                        .build())
                    .build())
                .location("us-central1")
                .project("my-project-name")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    basic_quality = gcp.dataplex.Datascan("basicQuality",
        data=gcp.dataplex.DatascanDataArgs(
            resource="//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
        ),
        data_quality_spec=gcp.dataplex.DatascanDataQualitySpecArgs(
            rules=[gcp.dataplex.DatascanDataQualitySpecRuleArgs(
                description="rule 1 for validity dimension",
                dimension="VALIDITY",
                name="rule1",
                table_condition_expectation=gcp.dataplex.DatascanDataQualitySpecRuleTableConditionExpectationArgs(
                    sql_expression="COUNT(*) > 0",
                ),
            )],
        ),
        data_scan_id="dataquality-basic",
        execution_spec=gcp.dataplex.DatascanExecutionSpecArgs(
            trigger=gcp.dataplex.DatascanExecutionSpecTriggerArgs(
                on_demand=gcp.dataplex.DatascanExecutionSpecTriggerOnDemandArgs(),
            ),
        ),
        location="us-central1",
        project="my-project-name")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const basicQuality = new gcp.dataplex.Datascan("basicQuality", {
        data: {
            resource: "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
        },
        dataQualitySpec: {
            rules: [{
                description: "rule 1 for validity dimension",
                dimension: "VALIDITY",
                name: "rule1",
                tableConditionExpectation: {
                    sqlExpression: "COUNT(*) > 0",
                },
            }],
        },
        dataScanId: "dataquality-basic",
        executionSpec: {
            trigger: {
                onDemand: {},
            },
        },
        location: "us-central1",
        project: "my-project-name",
    });
    
    resources:
      basicQuality:
        type: gcp:dataplex:Datascan
        properties:
          data:
            resource: //bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare
          dataQualitySpec:
            rules:
              - description: rule 1 for validity dimension
                dimension: VALIDITY
                name: rule1
                tableConditionExpectation:
                  sqlExpression: COUNT(*) > 0
          dataScanId: dataquality-basic
          executionSpec:
            trigger:
              onDemand: {}
          location: us-central1
          project: my-project-name
    

    Dataplex Datascan Full Quality

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var fullQuality = new Gcp.DataPlex.Datascan("fullQuality", new()
        {
            Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
            {
                Resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations",
            },
            DataQualitySpec = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecArgs
            {
                RowFilter = "station_id > 1000",
                Rules = new[]
                {
                    new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                    {
                        Column = "address",
                        Dimension = "VALIDITY",
                        NonNullExpectation = null,
                        Threshold = 0.99,
                    },
                    new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                    {
                        Column = "council_district",
                        Dimension = "VALIDITY",
                        IgnoreNull = true,
                        RangeExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRangeExpectationArgs
                        {
                            MaxValue = "10",
                            MinValue = "1",
                            StrictMaxEnabled = false,
                            StrictMinEnabled = true,
                        },
                        Threshold = 0.9,
                    },
                    new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                    {
                        Column = "power_type",
                        Dimension = "VALIDITY",
                        IgnoreNull = false,
                        RegexExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRegexExpectationArgs
                        {
                            Regex = ".*solar.*",
                        },
                    },
                    new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                    {
                        Column = "property_type",
                        Dimension = "VALIDITY",
                        IgnoreNull = false,
                        SetExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleSetExpectationArgs
                        {
                            Values = new[]
                            {
                                "sidewalk",
                                "parkland",
                            },
                        },
                    },
                    new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                    {
                        Column = "address",
                        Dimension = "UNIQUENESS",
                        UniquenessExpectation = null,
                    },
                    new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                    {
                        Column = "number_of_docks",
                        Dimension = "VALIDITY",
                        StatisticRangeExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleStatisticRangeExpectationArgs
                        {
                            MaxValue = "15",
                            MinValue = "5",
                            Statistic = "MEAN",
                            StrictMaxEnabled = true,
                            StrictMinEnabled = true,
                        },
                    },
                    new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                    {
                        Column = "footprint_length",
                        Dimension = "VALIDITY",
                        RowConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRowConditionExpectationArgs
                        {
                            SqlExpression = "footprint_length > 0 AND footprint_length <= 10",
                        },
                    },
                    new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                    {
                        Dimension = "VALIDITY",
                        TableConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleTableConditionExpectationArgs
                        {
                            SqlExpression = "COUNT(*) > 0",
                        },
                    },
                },
                SamplingPercent = 5,
            },
            DataScanId = "dataquality-full",
            Description = "Example resource - Full Datascan Quality",
            DisplayName = "Full Datascan Quality",
            ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
            {
                Field = "modified_date",
                Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
                {
                    Schedule = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerScheduleArgs
                    {
                        Cron = "TZ=America/New_York 1 1 * * *",
                    },
                },
            },
            Labels = 
            {
                { "author", "billing" },
            },
            Location = "us-central1",
            Project = "my-project-name",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataplex"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dataplex.NewDatascan(ctx, "fullQuality", &dataplex.DatascanArgs{
    			Data: &dataplex.DatascanDataArgs{
    				Resource: pulumi.String("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations"),
    			},
    			DataQualitySpec: &dataplex.DatascanDataQualitySpecArgs{
    				RowFilter: pulumi.String("station_id > 1000"),
    				Rules: dataplex.DatascanDataQualitySpecRuleArray{
    					&dataplex.DatascanDataQualitySpecRuleArgs{
    						Column:             pulumi.String("address"),
    						Dimension:          pulumi.String("VALIDITY"),
    						NonNullExpectation: nil,
    						Threshold:          pulumi.Float64(0.99),
    					},
    					&dataplex.DatascanDataQualitySpecRuleArgs{
    						Column:     pulumi.String("council_district"),
    						Dimension:  pulumi.String("VALIDITY"),
    						IgnoreNull: pulumi.Bool(true),
    						RangeExpectation: &dataplex.DatascanDataQualitySpecRuleRangeExpectationArgs{
    							MaxValue:         pulumi.String("10"),
    							MinValue:         pulumi.String("1"),
    							StrictMaxEnabled: pulumi.Bool(false),
    							StrictMinEnabled: pulumi.Bool(true),
    						},
    						Threshold: pulumi.Float64(0.9),
    					},
    					&dataplex.DatascanDataQualitySpecRuleArgs{
    						Column:     pulumi.String("power_type"),
    						Dimension:  pulumi.String("VALIDITY"),
    						IgnoreNull: pulumi.Bool(false),
    						RegexExpectation: &dataplex.DatascanDataQualitySpecRuleRegexExpectationArgs{
    							Regex: pulumi.String(".*solar.*"),
    						},
    					},
    					&dataplex.DatascanDataQualitySpecRuleArgs{
    						Column:     pulumi.String("property_type"),
    						Dimension:  pulumi.String("VALIDITY"),
    						IgnoreNull: pulumi.Bool(false),
    						SetExpectation: &dataplex.DatascanDataQualitySpecRuleSetExpectationArgs{
    							Values: pulumi.StringArray{
    								pulumi.String("sidewalk"),
    								pulumi.String("parkland"),
    							},
    						},
    					},
    					&dataplex.DatascanDataQualitySpecRuleArgs{
    						Column:                pulumi.String("address"),
    						Dimension:             pulumi.String("UNIQUENESS"),
    						UniquenessExpectation: nil,
    					},
    					&dataplex.DatascanDataQualitySpecRuleArgs{
    						Column:    pulumi.String("number_of_docks"),
    						Dimension: pulumi.String("VALIDITY"),
    						StatisticRangeExpectation: &dataplex.DatascanDataQualitySpecRuleStatisticRangeExpectationArgs{
    							MaxValue:         pulumi.String("15"),
    							MinValue:         pulumi.String("5"),
    							Statistic:        pulumi.String("MEAN"),
    							StrictMaxEnabled: pulumi.Bool(true),
    							StrictMinEnabled: pulumi.Bool(true),
    						},
    					},
    					&dataplex.DatascanDataQualitySpecRuleArgs{
    						Column:    pulumi.String("footprint_length"),
    						Dimension: pulumi.String("VALIDITY"),
    						RowConditionExpectation: &dataplex.DatascanDataQualitySpecRuleRowConditionExpectationArgs{
    							SqlExpression: pulumi.String("footprint_length > 0 AND footprint_length <= 10"),
    						},
    					},
    					&dataplex.DatascanDataQualitySpecRuleArgs{
    						Dimension: pulumi.String("VALIDITY"),
    						TableConditionExpectation: &dataplex.DatascanDataQualitySpecRuleTableConditionExpectationArgs{
    							SqlExpression: pulumi.String("COUNT(*) > 0"),
    						},
    					},
    				},
    				SamplingPercent: pulumi.Float64(5),
    			},
    			DataScanId:  pulumi.String("dataquality-full"),
    			Description: pulumi.String("Example resource - Full Datascan Quality"),
    			DisplayName: pulumi.String("Full Datascan Quality"),
    			ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
    				Field: pulumi.String("modified_date"),
    				Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
    					Schedule: &dataplex.DatascanExecutionSpecTriggerScheduleArgs{
    						Cron: pulumi.String("TZ=America/New_York 1 1 * * *"),
    					},
    				},
    			},
    			Labels: pulumi.StringMap{
    				"author": pulumi.String("billing"),
    			},
    			Location: pulumi.String("us-central1"),
    			Project:  pulumi.String("my-project-name"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.dataplex.Datascan;
    import com.pulumi.gcp.dataplex.DatascanArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanDataArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanDataQualitySpecArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerArgs;
    import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerScheduleArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var fullQuality = new Datascan("fullQuality", DatascanArgs.builder()        
                .data(DatascanDataArgs.builder()
                    .resource("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations")
                    .build())
                .dataQualitySpec(DatascanDataQualitySpecArgs.builder()
                    .rowFilter("station_id > 1000")
                    .rules(                
                        DatascanDataQualitySpecRuleArgs.builder()
                            .column("address")
                            .dimension("VALIDITY")
                            .nonNullExpectation()
                            .threshold(0.99)
                            .build(),
                        DatascanDataQualitySpecRuleArgs.builder()
                            .column("council_district")
                            .dimension("VALIDITY")
                            .ignoreNull(true)
                            .rangeExpectation(DatascanDataQualitySpecRuleRangeExpectationArgs.builder()
                                .maxValue(10)
                                .minValue(1)
                                .strictMaxEnabled(false)
                                .strictMinEnabled(true)
                                .build())
                            .threshold(0.9)
                            .build(),
                        DatascanDataQualitySpecRuleArgs.builder()
                            .column("power_type")
                            .dimension("VALIDITY")
                            .ignoreNull(false)
                            .regexExpectation(DatascanDataQualitySpecRuleRegexExpectationArgs.builder()
                                .regex(".*solar.*")
                                .build())
                            .build(),
                        DatascanDataQualitySpecRuleArgs.builder()
                            .column("property_type")
                            .dimension("VALIDITY")
                            .ignoreNull(false)
                            .setExpectation(DatascanDataQualitySpecRuleSetExpectationArgs.builder()
                                .values(                            
                                    "sidewalk",
                                    "parkland")
                                .build())
                            .build(),
                        DatascanDataQualitySpecRuleArgs.builder()
                            .column("address")
                            .dimension("UNIQUENESS")
                            .uniquenessExpectation()
                            .build(),
                        DatascanDataQualitySpecRuleArgs.builder()
                            .column("number_of_docks")
                            .dimension("VALIDITY")
                            .statisticRangeExpectation(DatascanDataQualitySpecRuleStatisticRangeExpectationArgs.builder()
                                .maxValue(15)
                                .minValue(5)
                                .statistic("MEAN")
                                .strictMaxEnabled(true)
                                .strictMinEnabled(true)
                                .build())
                            .build(),
                        DatascanDataQualitySpecRuleArgs.builder()
                            .column("footprint_length")
                            .dimension("VALIDITY")
                            .rowConditionExpectation(DatascanDataQualitySpecRuleRowConditionExpectationArgs.builder()
                                .sqlExpression("footprint_length > 0 AND footprint_length <= 10")
                                .build())
                            .build(),
                        DatascanDataQualitySpecRuleArgs.builder()
                            .dimension("VALIDITY")
                            .tableConditionExpectation(DatascanDataQualitySpecRuleTableConditionExpectationArgs.builder()
                                .sqlExpression("COUNT(*) > 0")
                                .build())
                            .build())
                    .samplingPercent(5)
                    .build())
                .dataScanId("dataquality-full")
                .description("Example resource - Full Datascan Quality")
                .displayName("Full Datascan Quality")
                .executionSpec(DatascanExecutionSpecArgs.builder()
                    .field("modified_date")
                    .trigger(DatascanExecutionSpecTriggerArgs.builder()
                        .schedule(DatascanExecutionSpecTriggerScheduleArgs.builder()
                            .cron("TZ=America/New_York 1 1 * * *")
                            .build())
                        .build())
                    .build())
                .labels(Map.of("author", "billing"))
                .location("us-central1")
                .project("my-project-name")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    full_quality = gcp.dataplex.Datascan("fullQuality",
        data=gcp.dataplex.DatascanDataArgs(
            resource="//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations",
        ),
        data_quality_spec=gcp.dataplex.DatascanDataQualitySpecArgs(
            row_filter="station_id > 1000",
            rules=[
                gcp.dataplex.DatascanDataQualitySpecRuleArgs(
                    column="address",
                    dimension="VALIDITY",
                    non_null_expectation=gcp.dataplex.DatascanDataQualitySpecRuleNonNullExpectationArgs(),
                    threshold=0.99,
                ),
                gcp.dataplex.DatascanDataQualitySpecRuleArgs(
                    column="council_district",
                    dimension="VALIDITY",
                    ignore_null=True,
                    range_expectation=gcp.dataplex.DatascanDataQualitySpecRuleRangeExpectationArgs(
                        max_value="10",
                        min_value="1",
                        strict_max_enabled=False,
                        strict_min_enabled=True,
                    ),
                    threshold=0.9,
                ),
                gcp.dataplex.DatascanDataQualitySpecRuleArgs(
                    column="power_type",
                    dimension="VALIDITY",
                    ignore_null=False,
                    regex_expectation=gcp.dataplex.DatascanDataQualitySpecRuleRegexExpectationArgs(
                        regex=".*solar.*",
                    ),
                ),
                gcp.dataplex.DatascanDataQualitySpecRuleArgs(
                    column="property_type",
                    dimension="VALIDITY",
                    ignore_null=False,
                    set_expectation=gcp.dataplex.DatascanDataQualitySpecRuleSetExpectationArgs(
                        values=[
                            "sidewalk",
                            "parkland",
                        ],
                    ),
                ),
                gcp.dataplex.DatascanDataQualitySpecRuleArgs(
                    column="address",
                    dimension="UNIQUENESS",
                    uniqueness_expectation=gcp.dataplex.DatascanDataQualitySpecRuleUniquenessExpectationArgs(),
                ),
                gcp.dataplex.DatascanDataQualitySpecRuleArgs(
                    column="number_of_docks",
                    dimension="VALIDITY",
                    statistic_range_expectation=gcp.dataplex.DatascanDataQualitySpecRuleStatisticRangeExpectationArgs(
                        max_value="15",
                        min_value="5",
                        statistic="MEAN",
                        strict_max_enabled=True,
                        strict_min_enabled=True,
                    ),
                ),
                gcp.dataplex.DatascanDataQualitySpecRuleArgs(
                    column="footprint_length",
                    dimension="VALIDITY",
                    row_condition_expectation=gcp.dataplex.DatascanDataQualitySpecRuleRowConditionExpectationArgs(
                        sql_expression="footprint_length > 0 AND footprint_length <= 10",
                    ),
                ),
                gcp.dataplex.DatascanDataQualitySpecRuleArgs(
                    dimension="VALIDITY",
                    table_condition_expectation=gcp.dataplex.DatascanDataQualitySpecRuleTableConditionExpectationArgs(
                        sql_expression="COUNT(*) > 0",
                    ),
                ),
            ],
            sampling_percent=5,
        ),
        data_scan_id="dataquality-full",
        description="Example resource - Full Datascan Quality",
        display_name="Full Datascan Quality",
        execution_spec=gcp.dataplex.DatascanExecutionSpecArgs(
            field="modified_date",
            trigger=gcp.dataplex.DatascanExecutionSpecTriggerArgs(
                schedule=gcp.dataplex.DatascanExecutionSpecTriggerScheduleArgs(
                    cron="TZ=America/New_York 1 1 * * *",
                ),
            ),
        ),
        labels={
            "author": "billing",
        },
        location="us-central1",
        project="my-project-name")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const fullQuality = new gcp.dataplex.Datascan("fullQuality", {
        data: {
            resource: "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations",
        },
        dataQualitySpec: {
            rowFilter: "station_id > 1000",
            rules: [
                {
                    column: "address",
                    dimension: "VALIDITY",
                    nonNullExpectation: {},
                    threshold: 0.99,
                },
                {
                    column: "council_district",
                    dimension: "VALIDITY",
                    ignoreNull: true,
                    rangeExpectation: {
                        maxValue: "10",
                        minValue: "1",
                        strictMaxEnabled: false,
                        strictMinEnabled: true,
                    },
                    threshold: 0.9,
                },
                {
                    column: "power_type",
                    dimension: "VALIDITY",
                    ignoreNull: false,
                    regexExpectation: {
                        regex: ".*solar.*",
                    },
                },
                {
                    column: "property_type",
                    dimension: "VALIDITY",
                    ignoreNull: false,
                    setExpectation: {
                        values: [
                            "sidewalk",
                            "parkland",
                        ],
                    },
                },
                {
                    column: "address",
                    dimension: "UNIQUENESS",
                    uniquenessExpectation: {},
                },
                {
                    column: "number_of_docks",
                    dimension: "VALIDITY",
                    statisticRangeExpectation: {
                        maxValue: "15",
                        minValue: "5",
                        statistic: "MEAN",
                        strictMaxEnabled: true,
                        strictMinEnabled: true,
                    },
                },
                {
                    column: "footprint_length",
                    dimension: "VALIDITY",
                    rowConditionExpectation: {
                        sqlExpression: "footprint_length > 0 AND footprint_length <= 10",
                    },
                },
                {
                    dimension: "VALIDITY",
                    tableConditionExpectation: {
                        sqlExpression: "COUNT(*) > 0",
                    },
                },
            ],
            samplingPercent: 5,
        },
        dataScanId: "dataquality-full",
        description: "Example resource - Full Datascan Quality",
        displayName: "Full Datascan Quality",
        executionSpec: {
            field: "modified_date",
            trigger: {
                schedule: {
                    cron: "TZ=America/New_York 1 1 * * *",
                },
            },
        },
        labels: {
            author: "billing",
        },
        location: "us-central1",
        project: "my-project-name",
    });
    
    resources:
      fullQuality:
        type: gcp:dataplex:Datascan
        properties:
          data:
            resource: //bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations
          dataQualitySpec:
            rowFilter: station_id > 1000
            rules:
              - column: address
                dimension: VALIDITY
                nonNullExpectation: {}
                threshold: 0.99
              - column: council_district
                dimension: VALIDITY
                ignoreNull: true
                rangeExpectation:
                  maxValue: 10
                  minValue: 1
                  strictMaxEnabled: false
                  strictMinEnabled: true
                threshold: 0.9
              - column: power_type
                dimension: VALIDITY
                ignoreNull: false
                regexExpectation:
                  regex: .*solar.*
              - column: property_type
                dimension: VALIDITY
                ignoreNull: false
                setExpectation:
                  values:
                    - sidewalk
                    - parkland
              - column: address
                dimension: UNIQUENESS
                uniquenessExpectation: {}
              - column: number_of_docks
                dimension: VALIDITY
                statisticRangeExpectation:
                  maxValue: 15
                  minValue: 5
                  statistic: MEAN
                  strictMaxEnabled: true
                  strictMinEnabled: true
              - column: footprint_length
                dimension: VALIDITY
                rowConditionExpectation:
                  sqlExpression: footprint_length > 0 AND footprint_length <= 10
              - dimension: VALIDITY
                tableConditionExpectation:
                  sqlExpression: COUNT(*) > 0
            samplingPercent: 5
          dataScanId: dataquality-full
          description: Example resource - Full Datascan Quality
          displayName: Full Datascan Quality
          executionSpec:
            field: modified_date
            trigger:
              schedule:
                cron: TZ=America/New_York 1 1 * * *
          labels:
            author: billing
          location: us-central1
          project: my-project-name
    

    Create Datascan Resource

    new Datascan(name: string, args: DatascanArgs, opts?: CustomResourceOptions);
    @overload
    def Datascan(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 data: Optional[DatascanDataArgs] = None,
                 data_profile_spec: Optional[DatascanDataProfileSpecArgs] = None,
                 data_quality_spec: Optional[DatascanDataQualitySpecArgs] = None,
                 data_scan_id: Optional[str] = None,
                 description: Optional[str] = None,
                 display_name: Optional[str] = None,
                 execution_spec: Optional[DatascanExecutionSpecArgs] = None,
                 labels: Optional[Mapping[str, str]] = None,
                 location: Optional[str] = None,
                 project: Optional[str] = None)
    @overload
    def Datascan(resource_name: str,
                 args: DatascanArgs,
                 opts: Optional[ResourceOptions] = None)
    func NewDatascan(ctx *Context, name string, args DatascanArgs, opts ...ResourceOption) (*Datascan, error)
    public Datascan(string name, DatascanArgs args, CustomResourceOptions? opts = null)
    public Datascan(String name, DatascanArgs args)
    public Datascan(String name, DatascanArgs args, CustomResourceOptions options)
    
    type: gcp:dataplex:Datascan
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args DatascanArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args DatascanArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args DatascanArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args DatascanArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args DatascanArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Datascan Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Datascan resource accepts the following input properties:

    Data DatascanData

    The data source for DataScan. Structure is documented below.

    DataScanId string

    DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.

    ExecutionSpec DatascanExecutionSpec

    DataScan execution settings. Structure is documented below.

    Location string

    The location where the data scan should reside.

    DataProfileSpec DatascanDataProfileSpec

    DataProfileScan related setting. Structure is documented below.

    DataQualitySpec DatascanDataQualitySpec

    DataQualityScan related setting. Structure is documented below.

    Description string

    Description of the scan.

    (Optional) Description of the rule. The maximum length is 1,024 characters.

    DisplayName string

    User friendly display name.

    Labels Dictionary<string, string>

    User-defined labels for the scan. A list of key->value pairs.

    Project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    Data DatascanDataArgs

    The data source for DataScan. Structure is documented below.

    DataScanId string

    DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.

    ExecutionSpec DatascanExecutionSpecArgs

    DataScan execution settings. Structure is documented below.

    Location string

    The location where the data scan should reside.

    DataProfileSpec DatascanDataProfileSpecArgs

    DataProfileScan related setting. Structure is documented below.

    DataQualitySpec DatascanDataQualitySpecArgs

    DataQualityScan related setting. Structure is documented below.

    Description string

    Description of the scan.

    (Optional) Description of the rule. The maximum length is 1,024 characters.

    DisplayName string

    User friendly display name.

    Labels map[string]string

    User-defined labels for the scan. A list of key->value pairs.

    Project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    data DatascanData

    The data source for DataScan. Structure is documented below.

    dataScanId String

    DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.

    executionSpec DatascanExecutionSpec

    DataScan execution settings. Structure is documented below.

    location String

    The location where the data scan should reside.

    dataProfileSpec DatascanDataProfileSpec

    DataProfileScan related setting. Structure is documented below.

    dataQualitySpec DatascanDataQualitySpec

    DataQualityScan related setting. Structure is documented below.

    description String

    Description of the scan.

    (Optional) Description of the rule. The maximum length is 1,024 characters.

    displayName String

    User friendly display name.

    labels Map<String,String>

    User-defined labels for the scan. A list of key->value pairs.

    project String

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    data DatascanData

    The data source for DataScan. Structure is documented below.

    dataScanId string

    DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.

    executionSpec DatascanExecutionSpec

    DataScan execution settings. Structure is documented below.

    location string

    The location where the data scan should reside.

    dataProfileSpec DatascanDataProfileSpec

    DataProfileScan related setting. Structure is documented below.

    dataQualitySpec DatascanDataQualitySpec

    DataQualityScan related setting. Structure is documented below.

    description string

    Description of the scan.

    (Optional) Description of the rule. The maximum length is 1,024 characters.

    displayName string

    User friendly display name.

    labels {[key: string]: string}

    User-defined labels for the scan. A list of key->value pairs.

    project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    data DatascanDataArgs

    The data source for DataScan. Structure is documented below.

    data_scan_id str

    DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.

    execution_spec DatascanExecutionSpecArgs

    DataScan execution settings. Structure is documented below.

    location str

    The location where the data scan should reside.

    data_profile_spec DatascanDataProfileSpecArgs

    DataProfileScan related setting. Structure is documented below.

    data_quality_spec DatascanDataQualitySpecArgs

    DataQualityScan related setting. Structure is documented below.

    description str

    Description of the scan.

    (Optional) Description of the rule. The maximum length is 1,024 characters.

    display_name str

    User friendly display name.

    labels Mapping[str, str]

    User-defined labels for the scan. A list of key->value pairs.

    project str

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    data Property Map

    The data source for DataScan. Structure is documented below.

    dataScanId String

    DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.

    executionSpec Property Map

    DataScan execution settings. Structure is documented below.

    location String

    The location where the data scan should reside.

    dataProfileSpec Property Map

    DataProfileScan related setting. Structure is documented below.

    dataQualitySpec Property Map

    DataQualityScan related setting. Structure is documented below.

    description String

    Description of the scan.

    (Optional) Description of the rule. The maximum length is 1,024 characters.

    displayName String

    User friendly display name.

    labels Map<String>

    User-defined labels for the scan. A list of key->value pairs.

    project String

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Datascan resource produces the following output properties:

    CreateTime string

    The time when the scan was created.

    DataProfileResults List<DatascanDataProfileResult>

    (Deprecated) The result of the data profile scan. Structure is documented below.

    Deprecated:

    data_profile_result is deprecated and will be removed in a future major release.

    DataQualityResults List<DatascanDataQualityResult>

    (Deprecated) The result of the data quality scan. Structure is documented below.

    Deprecated:

    data_quality_result is deprecated and will be removed in a future major release.

    ExecutionStatuses List<DatascanExecutionStatus>

    Status of the data scan execution. Structure is documented below.

    Id string

    The provider-assigned unique ID for this managed resource.

    Name string

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    State string

    Current state of the DataScan.

    Type string

    The field data type.

    Uid string

    System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.

    UpdateTime string

    The time when the scan was last updated.

    CreateTime string

    The time when the scan was created.

    DataProfileResults []DatascanDataProfileResult

    (Deprecated) The result of the data profile scan. Structure is documented below.

    Deprecated:

    data_profile_result is deprecated and will be removed in a future major release.

    DataQualityResults []DatascanDataQualityResult

    (Deprecated) The result of the data quality scan. Structure is documented below.

    Deprecated:

    data_quality_result is deprecated and will be removed in a future major release.

    ExecutionStatuses []DatascanExecutionStatus

    Status of the data scan execution. Structure is documented below.

    Id string

    The provider-assigned unique ID for this managed resource.

    Name string

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    State string

    Current state of the DataScan.

    Type string

    The field data type.

    Uid string

    System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.

    UpdateTime string

    The time when the scan was last updated.

    createTime String

    The time when the scan was created.

    dataProfileResults List<DatascanDataProfileResult>

    (Deprecated) The result of the data profile scan. Structure is documented below.

    Deprecated:

    data_profile_result is deprecated and will be removed in a future major release.

    dataQualityResults List<DatascanDataQualityResult>

    (Deprecated) The result of the data quality scan. Structure is documented below.

    Deprecated:

    data_quality_result is deprecated and will be removed in a future major release.

    executionStatuses List<DatascanExecutionStatus>

    Status of the data scan execution. Structure is documented below.

    id String

    The provider-assigned unique ID for this managed resource.

    name String

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    state String

    Current state of the DataScan.

    type String

    The field data type.

    uid String

    System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.

    updateTime String

    The time when the scan was last updated.

    createTime string

    The time when the scan was created.

    dataProfileResults DatascanDataProfileResult[]

    (Deprecated) The result of the data profile scan. Structure is documented below.

    Deprecated:

    data_profile_result is deprecated and will be removed in a future major release.

    dataQualityResults DatascanDataQualityResult[]

    (Deprecated) The result of the data quality scan. Structure is documented below.

    Deprecated:

    data_quality_result is deprecated and will be removed in a future major release.

    executionStatuses DatascanExecutionStatus[]

    Status of the data scan execution. Structure is documented below.

    id string

    The provider-assigned unique ID for this managed resource.

    name string

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    state string

    Current state of the DataScan.

    type string

    The field data type.

    uid string

    System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.

    updateTime string

    The time when the scan was last updated.

    create_time str

    The time when the scan was created.

    data_profile_results Sequence[DatascanDataProfileResult]

    (Deprecated) The result of the data profile scan. Structure is documented below.

    Deprecated:

    data_profile_result is deprecated and will be removed in a future major release.

    data_quality_results Sequence[DatascanDataQualityResult]

    (Deprecated) The result of the data quality scan. Structure is documented below.

    Deprecated:

    data_quality_result is deprecated and will be removed in a future major release.

    execution_statuses Sequence[DatascanExecutionStatus]

    Status of the data scan execution. Structure is documented below.

    id str

    The provider-assigned unique ID for this managed resource.

    name str

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    state str

    Current state of the DataScan.

    type str

    The field data type.

    uid str

    System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.

    update_time str

    The time when the scan was last updated.

    createTime String

    The time when the scan was created.

    dataProfileResults List<Property Map>

    (Deprecated) The result of the data profile scan. Structure is documented below.

    Deprecated:

    data_profile_result is deprecated and will be removed in a future major release.

    dataQualityResults List<Property Map>

    (Deprecated) The result of the data quality scan. Structure is documented below.

    Deprecated:

    data_quality_result is deprecated and will be removed in a future major release.

    executionStatuses List<Property Map>

    Status of the data scan execution. Structure is documented below.

    id String

    The provider-assigned unique ID for this managed resource.

    name String

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    state String

    Current state of the DataScan.

    type String

    The field data type.

    uid String

    System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.

    updateTime String

    The time when the scan was last updated.

    Look up Existing Datascan Resource

    Get an existing Datascan resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: DatascanState, opts?: CustomResourceOptions): Datascan
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            create_time: Optional[str] = None,
            data: Optional[DatascanDataArgs] = None,
            data_profile_results: Optional[Sequence[DatascanDataProfileResultArgs]] = None,
            data_profile_spec: Optional[DatascanDataProfileSpecArgs] = None,
            data_quality_results: Optional[Sequence[DatascanDataQualityResultArgs]] = None,
            data_quality_spec: Optional[DatascanDataQualitySpecArgs] = None,
            data_scan_id: Optional[str] = None,
            description: Optional[str] = None,
            display_name: Optional[str] = None,
            execution_spec: Optional[DatascanExecutionSpecArgs] = None,
            execution_statuses: Optional[Sequence[DatascanExecutionStatusArgs]] = None,
            labels: Optional[Mapping[str, str]] = None,
            location: Optional[str] = None,
            name: Optional[str] = None,
            project: Optional[str] = None,
            state: Optional[str] = None,
            type: Optional[str] = None,
            uid: Optional[str] = None,
            update_time: Optional[str] = None) -> Datascan
    func GetDatascan(ctx *Context, name string, id IDInput, state *DatascanState, opts ...ResourceOption) (*Datascan, error)
    public static Datascan Get(string name, Input<string> id, DatascanState? state, CustomResourceOptions? opts = null)
    public static Datascan get(String name, Output<String> id, DatascanState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    CreateTime string

    The time when the scan was created.

    Data DatascanData

    The data source for DataScan. Structure is documented below.

    DataProfileResults List<DatascanDataProfileResult>

    (Deprecated) The result of the data profile scan. Structure is documented below.

    Deprecated:

    data_profile_result is deprecated and will be removed in a future major release.

    DataProfileSpec DatascanDataProfileSpec

    DataProfileScan related setting. Structure is documented below.

    DataQualityResults List<DatascanDataQualityResult>

    (Deprecated) The result of the data quality scan. Structure is documented below.

    Deprecated:

    data_quality_result is deprecated and will be removed in a future major release.

    DataQualitySpec DatascanDataQualitySpec

    DataQualityScan related setting. Structure is documented below.

    DataScanId string

    DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.

    Description string

    Description of the scan.

    (Optional) Description of the rule. The maximum length is 1,024 characters.

    DisplayName string

    User friendly display name.

    ExecutionSpec DatascanExecutionSpec

    DataScan execution settings. Structure is documented below.

    ExecutionStatuses List<DatascanExecutionStatus>

    Status of the data scan execution. Structure is documented below.

    Labels Dictionary<string, string>

    User-defined labels for the scan. A list of key->value pairs.

    Location string

    The location where the data scan should reside.

    Name string

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    Project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    State string

    Current state of the DataScan.

    Type string

    The field data type.

    Uid string

    System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.

    UpdateTime string

    The time when the scan was last updated.

    CreateTime string

    The time when the scan was created.

    Data DatascanDataArgs

    The data source for DataScan. Structure is documented below.

    DataProfileResults []DatascanDataProfileResultArgs

    (Deprecated) The result of the data profile scan. Structure is documented below.

    Deprecated:

    data_profile_result is deprecated and will be removed in a future major release.

    DataProfileSpec DatascanDataProfileSpecArgs

    DataProfileScan related setting. Structure is documented below.

    DataQualityResults []DatascanDataQualityResultArgs

    (Deprecated) The result of the data quality scan. Structure is documented below.

    Deprecated:

    data_quality_result is deprecated and will be removed in a future major release.

    DataQualitySpec DatascanDataQualitySpecArgs

    DataQualityScan related setting. Structure is documented below.

    DataScanId string

    DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.

    Description string

    Description of the scan.

    (Optional) Description of the rule. The maximum length is 1,024 characters.

    DisplayName string

    User friendly display name.

    ExecutionSpec DatascanExecutionSpecArgs

    DataScan execution settings. Structure is documented below.

    ExecutionStatuses []DatascanExecutionStatusArgs

    Status of the data scan execution. Structure is documented below.

    Labels map[string]string

    User-defined labels for the scan. A list of key->value pairs.

    Location string

    The location where the data scan should reside.

    Name string

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    Project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    State string

    Current state of the DataScan.

    Type string

    The field data type.

    Uid string

    System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.

    UpdateTime string

    The time when the scan was last updated.

    createTime String

    The time when the scan was created.

    data DatascanData

    The data source for DataScan. Structure is documented below.

    dataProfileResults List<DatascanDataProfileResult>

    (Deprecated) The result of the data profile scan. Structure is documented below.

    Deprecated:

    data_profile_result is deprecated and will be removed in a future major release.

    dataProfileSpec DatascanDataProfileSpec

    DataProfileScan related setting. Structure is documented below.

    dataQualityResults List<DatascanDataQualityResult>

    (Deprecated) The result of the data quality scan. Structure is documented below.

    Deprecated:

    data_quality_result is deprecated and will be removed in a future major release.

    dataQualitySpec DatascanDataQualitySpec

    DataQualityScan related setting. Structure is documented below.

    dataScanId String

    DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.

    description String

    Description of the scan.

    (Optional) Description of the rule. The maximum length is 1,024 characters.

    displayName String

    User friendly display name.

    executionSpec DatascanExecutionSpec

    DataScan execution settings. Structure is documented below.

    executionStatuses List<DatascanExecutionStatus>

    Status of the data scan execution. Structure is documented below.

    labels Map<String,String>

    User-defined labels for the scan. A list of key->value pairs.

    location String

    The location where the data scan should reside.

    name String

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    project String

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    state String

    Current state of the DataScan.

    type String

    The field data type.

    uid String

    System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.

    updateTime String

    The time when the scan was last updated.

    createTime string

    The time when the scan was created.

    data DatascanData

    The data source for DataScan. Structure is documented below.

    dataProfileResults DatascanDataProfileResult[]

    (Deprecated) The result of the data profile scan. Structure is documented below.

    Deprecated:

    data_profile_result is deprecated and will be removed in a future major release.

    dataProfileSpec DatascanDataProfileSpec

    DataProfileScan related setting. Structure is documented below.

    dataQualityResults DatascanDataQualityResult[]

    (Deprecated) The result of the data quality scan. Structure is documented below.

    Deprecated:

    data_quality_result is deprecated and will be removed in a future major release.

    dataQualitySpec DatascanDataQualitySpec

    DataQualityScan related setting. Structure is documented below.

    dataScanId string

    DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.

    description string

    Description of the scan.

    (Optional) Description of the rule. The maximum length is 1,024 characters.

    displayName string

    User friendly display name.

    executionSpec DatascanExecutionSpec

    DataScan execution settings. Structure is documented below.

    executionStatuses DatascanExecutionStatus[]

    Status of the data scan execution. Structure is documented below.

    labels {[key: string]: string}

    User-defined labels for the scan. A list of key->value pairs.

    location string

    The location where the data scan should reside.

    name string

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    state string

    Current state of the DataScan.

    type string

    The field data type.

    uid string

    System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.

    updateTime string

    The time when the scan was last updated.

    create_time str

    The time when the scan was created.

    data DatascanDataArgs

    The data source for DataScan. Structure is documented below.

    data_profile_results Sequence[DatascanDataProfileResultArgs]

    (Deprecated) The result of the data profile scan. Structure is documented below.

    Deprecated:

    data_profile_result is deprecated and will be removed in a future major release.

    data_profile_spec DatascanDataProfileSpecArgs

    DataProfileScan related setting. Structure is documented below.

    data_quality_results Sequence[DatascanDataQualityResultArgs]

    (Deprecated) The result of the data quality scan. Structure is documented below.

    Deprecated:

    data_quality_result is deprecated and will be removed in a future major release.

    data_quality_spec DatascanDataQualitySpecArgs

    DataQualityScan related setting. Structure is documented below.

    data_scan_id str

    DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.

    description str

    Description of the scan.

    (Optional) Description of the rule. The maximum length is 1,024 characters.

    display_name str

    User friendly display name.

    execution_spec DatascanExecutionSpecArgs

    DataScan execution settings. Structure is documented below.

    execution_statuses Sequence[DatascanExecutionStatusArgs]

    Status of the data scan execution. Structure is documented below.

    labels Mapping[str, str]

    User-defined labels for the scan. A list of key->value pairs.

    location str

    The location where the data scan should reside.

    name str

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    project str

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    state str

    Current state of the DataScan.

    type str

    The field data type.

    uid str

    System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.

    update_time str

    The time when the scan was last updated.

    createTime String

    The time when the scan was created.

    data Property Map

    The data source for DataScan. Structure is documented below.

    dataProfileResults List<Property Map>

    (Deprecated) The result of the data profile scan. Structure is documented below.

    Deprecated:

    data_profile_result is deprecated and will be removed in a future major release.

    dataProfileSpec Property Map

    DataProfileScan related setting. Structure is documented below.

    dataQualityResults List<Property Map>

    (Deprecated) The result of the data quality scan. Structure is documented below.

    Deprecated:

    data_quality_result is deprecated and will be removed in a future major release.

    dataQualitySpec Property Map

    DataQualityScan related setting. Structure is documented below.

    dataScanId String

    DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.

    description String

    Description of the scan.

    (Optional) Description of the rule. The maximum length is 1,024 characters.

    displayName String

    User friendly display name.

    executionSpec Property Map

    DataScan execution settings. Structure is documented below.

    executionStatuses List<Property Map>

    Status of the data scan execution. Structure is documented below.

    labels Map<String>

    User-defined labels for the scan. A list of key->value pairs.

    location String

    The location where the data scan should reside.

    name String

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    project String

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    state String

    Current state of the DataScan.

    type String

    The field data type.

    uid String

    System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.

    updateTime String

    The time when the scan was last updated.

    Supporting Types

    DatascanData, DatascanDataArgs

    Entity string

    The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.

    Resource string

    The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.

    Entity string

    The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.

    Resource string

    The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.

    entity String

    The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.

    resource String

    The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.

    entity string

    The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.

    resource string

    The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.

    entity str

    The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.

    resource str

    The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.

    entity String

    The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.

    resource String

    The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.

    DatascanDataProfileResult, DatascanDataProfileResultArgs

    Profiles List<DatascanDataProfileResultProfile>

    Profile information for the corresponding field. Structure is documented below.

    RowCount string

    The count of rows scanned.

    ScannedDatas List<DatascanDataProfileResultScannedData>

    (Output) The data scanned for this result. Structure is documented below.

    Profiles []DatascanDataProfileResultProfile

    Profile information for the corresponding field. Structure is documented below.

    RowCount string

    The count of rows scanned.

    ScannedDatas []DatascanDataProfileResultScannedData

    (Output) The data scanned for this result. Structure is documented below.

    profiles List<DatascanDataProfileResultProfile>

    Profile information for the corresponding field. Structure is documented below.

    rowCount String

    The count of rows scanned.

    scannedDatas List<DatascanDataProfileResultScannedData>

    (Output) The data scanned for this result. Structure is documented below.

    profiles DatascanDataProfileResultProfile[]

    Profile information for the corresponding field. Structure is documented below.

    rowCount string

    The count of rows scanned.

    scannedDatas DatascanDataProfileResultScannedData[]

    (Output) The data scanned for this result. Structure is documented below.

    profiles Sequence[DatascanDataProfileResultProfile]

    Profile information for the corresponding field. Structure is documented below.

    row_count str

    The count of rows scanned.

    scanned_datas Sequence[DatascanDataProfileResultScannedData]

    (Output) The data scanned for this result. Structure is documented below.

    profiles List<Property Map>

    Profile information for the corresponding field. Structure is documented below.

    rowCount String

    The count of rows scanned.

    scannedDatas List<Property Map>

    (Output) The data scanned for this result. Structure is documented below.

    DatascanDataProfileResultProfile, DatascanDataProfileResultProfileArgs

    Fields List<DatascanDataProfileResultProfileField>

    List of fields with structural and profile information for each field. Structure is documented below.

    Fields []DatascanDataProfileResultProfileField

    List of fields with structural and profile information for each field. Structure is documented below.

    fields List<DatascanDataProfileResultProfileField>

    List of fields with structural and profile information for each field. Structure is documented below.

    fields DatascanDataProfileResultProfileField[]

    List of fields with structural and profile information for each field. Structure is documented below.

    fields Sequence[DatascanDataProfileResultProfileField]

    List of fields with structural and profile information for each field. Structure is documented below.

    fields List<Property Map>

    List of fields with structural and profile information for each field. Structure is documented below.

    DatascanDataProfileResultProfileField, DatascanDataProfileResultProfileFieldArgs

    Mode string

    The mode of the field. Possible values include:

    1. REQUIRED, if it is a required field.
    2. NULLABLE, if it is an optional field.
    3. REPEATED, if it is a repeated field.
    Name string

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    Profile DatascanDataProfileResultProfileFieldProfile

    Profile information for the corresponding field. Structure is documented below.

    Type string

    The field data type.

    Mode string

    The mode of the field. Possible values include:

    1. REQUIRED, if it is a required field.
    2. NULLABLE, if it is an optional field.
    3. REPEATED, if it is a repeated field.
    Name string

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    Profile DatascanDataProfileResultProfileFieldProfile

    Profile information for the corresponding field. Structure is documented below.

    Type string

    The field data type.

    mode String

    The mode of the field. Possible values include:

    1. REQUIRED, if it is a required field.
    2. NULLABLE, if it is an optional field.
    3. REPEATED, if it is a repeated field.
    name String

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    profile DatascanDataProfileResultProfileFieldProfile

    Profile information for the corresponding field. Structure is documented below.

    type String

    The field data type.

    mode string

    The mode of the field. Possible values include:

    1. REQUIRED, if it is a required field.
    2. NULLABLE, if it is an optional field.
    3. REPEATED, if it is a repeated field.
    name string

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    profile DatascanDataProfileResultProfileFieldProfile

    Profile information for the corresponding field. Structure is documented below.

    type string

    The field data type.

    mode str

    The mode of the field. Possible values include:

    1. REQUIRED, if it is a required field.
    2. NULLABLE, if it is an optional field.
    3. REPEATED, if it is a repeated field.
    name str

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    profile DatascanDataProfileResultProfileFieldProfile

    Profile information for the corresponding field. Structure is documented below.

    type str

    The field data type.

    mode String

    The mode of the field. Possible values include:

    1. REQUIRED, if it is a required field.
    2. NULLABLE, if it is an optional field.
    3. REPEATED, if it is a repeated field.
    name String

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    profile Property Map

    Profile information for the corresponding field. Structure is documented below.

    type String

    The field data type.

    DatascanDataProfileResultProfileFieldProfile, DatascanDataProfileResultProfileFieldProfileArgs

    DistinctRatio int

    Ratio of rows with distinct values against total scanned rows. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode.

    DoubleProfiles List<DatascanDataProfileResultProfileFieldProfileDoubleProfile>

    (Output) Double type field information. Structure is documented below.

    IntegerProfiles List<DatascanDataProfileResultProfileFieldProfileIntegerProfile>

    (Output) Integer type field information. Structure is documented below.

    NullRatio int

    (Output) Ratio of rows with null value against total scanned rows.

    StringProfiles List<DatascanDataProfileResultProfileFieldProfileStringProfile>

    (Output) String type field information. Structure is documented below.

    TopNValues DatascanDataProfileResultProfileFieldProfileTopNValues

    The list of top N non-null values and number of times they occur in the scanned data. N is 10 or equal to the number of distinct values in the field, whichever is smaller. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode. Structure is documented below.

    DistinctRatio int

    Ratio of rows with distinct values against total scanned rows. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode.

    DoubleProfiles []DatascanDataProfileResultProfileFieldProfileDoubleProfile

    (Output) Double type field information. Structure is documented below.

    IntegerProfiles []DatascanDataProfileResultProfileFieldProfileIntegerProfile

    (Output) Integer type field information. Structure is documented below.

    NullRatio int

    (Output) Ratio of rows with null value against total scanned rows.

    StringProfiles []DatascanDataProfileResultProfileFieldProfileStringProfile

    (Output) String type field information. Structure is documented below.

    TopNValues DatascanDataProfileResultProfileFieldProfileTopNValues

    The list of top N non-null values and number of times they occur in the scanned data. N is 10 or equal to the number of distinct values in the field, whichever is smaller. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode. Structure is documented below.

    distinctRatio Integer

    Ratio of rows with distinct values against total scanned rows. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode.

    doubleProfiles List<DatascanDataProfileResultProfileFieldProfileDoubleProfile>

    (Output) Double type field information. Structure is documented below.

    integerProfiles List<DatascanDataProfileResultProfileFieldProfileIntegerProfile>

    (Output) Integer type field information. Structure is documented below.

    nullRatio Integer

    (Output) Ratio of rows with null value against total scanned rows.

    stringProfiles List<DatascanDataProfileResultProfileFieldProfileStringProfile>

    (Output) String type field information. Structure is documented below.

    topNValues DatascanDataProfileResultProfileFieldProfileTopNValues

    The list of top N non-null values and number of times they occur in the scanned data. N is 10 or equal to the number of distinct values in the field, whichever is smaller. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode. Structure is documented below.

    distinctRatio number

    Ratio of rows with distinct values against total scanned rows. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode.

    doubleProfiles DatascanDataProfileResultProfileFieldProfileDoubleProfile[]

    (Output) Double type field information. Structure is documented below.

    integerProfiles DatascanDataProfileResultProfileFieldProfileIntegerProfile[]

    (Output) Integer type field information. Structure is documented below.

    nullRatio number

    (Output) Ratio of rows with null value against total scanned rows.

    stringProfiles DatascanDataProfileResultProfileFieldProfileStringProfile[]

    (Output) String type field information. Structure is documented below.

    topNValues DatascanDataProfileResultProfileFieldProfileTopNValues

    The list of top N non-null values and number of times they occur in the scanned data. N is 10 or equal to the number of distinct values in the field, whichever is smaller. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode. Structure is documented below.

    distinct_ratio int

    Ratio of rows with distinct values against total scanned rows. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode.

    double_profiles Sequence[DatascanDataProfileResultProfileFieldProfileDoubleProfile]

    (Output) Double type field information. Structure is documented below.

    integer_profiles Sequence[DatascanDataProfileResultProfileFieldProfileIntegerProfile]

    (Output) Integer type field information. Structure is documented below.

    null_ratio int

    (Output) Ratio of rows with null value against total scanned rows.

    string_profiles Sequence[DatascanDataProfileResultProfileFieldProfileStringProfile]

    (Output) String type field information. Structure is documented below.

    top_n_values DatascanDataProfileResultProfileFieldProfileTopNValues

    The list of top N non-null values and number of times they occur in the scanned data. N is 10 or equal to the number of distinct values in the field, whichever is smaller. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode. Structure is documented below.

    distinctRatio Number

    Ratio of rows with distinct values against total scanned rows. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode.

    doubleProfiles List<Property Map>

    (Output) Double type field information. Structure is documented below.

    integerProfiles List<Property Map>

    (Output) Integer type field information. Structure is documented below.

    nullRatio Number

    (Output) Ratio of rows with null value against total scanned rows.

    stringProfiles List<Property Map>

    (Output) String type field information. Structure is documented below.

    topNValues Property Map

    The list of top N non-null values and number of times they occur in the scanned data. N is 10 or equal to the number of distinct values in the field, whichever is smaller. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode. Structure is documented below.

    DatascanDataProfileResultProfileFieldProfileDoubleProfile, DatascanDataProfileResultProfileFieldProfileDoubleProfileArgs

    Average int

    Average of non-null values in the scanned data. NaN, if the field has a NaN.

    Max string

    Maximum of non-null values in the scanned data. NaN, if the field has a NaN.

    Min string

    Minimum of non-null values in the scanned data. NaN, if the field has a NaN.

    Quartiles string

    A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.

    StandardDeviation int

    Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.

    Average int

    Average of non-null values in the scanned data. NaN, if the field has a NaN.

    Max string

    Maximum of non-null values in the scanned data. NaN, if the field has a NaN.

    Min string

    Minimum of non-null values in the scanned data. NaN, if the field has a NaN.

    Quartiles string

    A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.

    StandardDeviation int

    Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.

    average Integer

    Average of non-null values in the scanned data. NaN, if the field has a NaN.

    max String

    Maximum of non-null values in the scanned data. NaN, if the field has a NaN.

    min String

    Minimum of non-null values in the scanned data. NaN, if the field has a NaN.

    quartiles String

    A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.

    standardDeviation Integer

    Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.

    average number

    Average of non-null values in the scanned data. NaN, if the field has a NaN.

    max string

    Maximum of non-null values in the scanned data. NaN, if the field has a NaN.

    min string

    Minimum of non-null values in the scanned data. NaN, if the field has a NaN.

    quartiles string

    A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.

    standardDeviation number

    Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.

    average int

    Average of non-null values in the scanned data. NaN, if the field has a NaN.

    max str

    Maximum of non-null values in the scanned data. NaN, if the field has a NaN.

    min str

    Minimum of non-null values in the scanned data. NaN, if the field has a NaN.

    quartiles str

    A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.

    standard_deviation int

    Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.

    average Number

    Average of non-null values in the scanned data. NaN, if the field has a NaN.

    max String

    Maximum of non-null values in the scanned data. NaN, if the field has a NaN.

    min String

    Minimum of non-null values in the scanned data. NaN, if the field has a NaN.

    quartiles String

    A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.

    standardDeviation Number

    Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.

    DatascanDataProfileResultProfileFieldProfileIntegerProfile, DatascanDataProfileResultProfileFieldProfileIntegerProfileArgs

    Average int

    Average of non-null values in the scanned data. NaN, if the field has a NaN.

    Max string

    Maximum of non-null values in the scanned data. NaN, if the field has a NaN.

    Min string

    Minimum of non-null values in the scanned data. NaN, if the field has a NaN.

    Quartiles string

    A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.

    StandardDeviation int

    Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.

    Average int

    Average of non-null values in the scanned data. NaN, if the field has a NaN.

    Max string

    Maximum of non-null values in the scanned data. NaN, if the field has a NaN.

    Min string

    Minimum of non-null values in the scanned data. NaN, if the field has a NaN.

    Quartiles string

    A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.

    StandardDeviation int

    Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.

    average Integer

    Average of non-null values in the scanned data. NaN, if the field has a NaN.

    max String

    Maximum of non-null values in the scanned data. NaN, if the field has a NaN.

    min String

    Minimum of non-null values in the scanned data. NaN, if the field has a NaN.

    quartiles String

    A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.

    standardDeviation Integer

    Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.

    average number

    Average of non-null values in the scanned data. NaN, if the field has a NaN.

    max string

    Maximum of non-null values in the scanned data. NaN, if the field has a NaN.

    min string

    Minimum of non-null values in the scanned data. NaN, if the field has a NaN.

    quartiles string

    A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.

    standardDeviation number

    Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.

    average int

    Average of non-null values in the scanned data. NaN, if the field has a NaN.

    max str

    Maximum of non-null values in the scanned data. NaN, if the field has a NaN.

    min str

    Minimum of non-null values in the scanned data. NaN, if the field has a NaN.

    quartiles str

    A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.

    standard_deviation int

    Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.

    average Number

    Average of non-null values in the scanned data. NaN, if the field has a NaN.

    max String

    Maximum of non-null values in the scanned data. NaN, if the field has a NaN.

    min String

    Minimum of non-null values in the scanned data. NaN, if the field has a NaN.

    quartiles String

    A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.

    standardDeviation Number

    Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.

    DatascanDataProfileResultProfileFieldProfileStringProfile, DatascanDataProfileResultProfileFieldProfileStringProfileArgs

    AverageLength int

    Average length of non-null values in the scanned data.

    MaxLength string

    Maximum length of non-null values in the scanned data.

    MinLength string

    Minimum length of non-null values in the scanned data.

    AverageLength int

    Average length of non-null values in the scanned data.

    MaxLength string

    Maximum length of non-null values in the scanned data.

    MinLength string

    Minimum length of non-null values in the scanned data.

    averageLength Integer

    Average length of non-null values in the scanned data.

    maxLength String

    Maximum length of non-null values in the scanned data.

    minLength String

    Minimum length of non-null values in the scanned data.

    averageLength number

    Average length of non-null values in the scanned data.

    maxLength string

    Maximum length of non-null values in the scanned data.

    minLength string

    Minimum length of non-null values in the scanned data.

    average_length int

    Average length of non-null values in the scanned data.

    max_length str

    Maximum length of non-null values in the scanned data.

    min_length str

    Minimum length of non-null values in the scanned data.

    averageLength Number

    Average length of non-null values in the scanned data.

    maxLength String

    Maximum length of non-null values in the scanned data.

    minLength String

    Minimum length of non-null values in the scanned data.

    DatascanDataProfileResultProfileFieldProfileTopNValues, DatascanDataProfileResultProfileFieldProfileTopNValuesArgs

    Count string

    Count of the corresponding value in the scanned data.

    Value string

    String value of a top N non-null value.

    Count string

    Count of the corresponding value in the scanned data.

    Value string

    String value of a top N non-null value.

    count String

    Count of the corresponding value in the scanned data.

    value String

    String value of a top N non-null value.

    count string

    Count of the corresponding value in the scanned data.

    value string

    String value of a top N non-null value.

    count str

    Count of the corresponding value in the scanned data.

    value str

    String value of a top N non-null value.

    count String

    Count of the corresponding value in the scanned data.

    value String

    String value of a top N non-null value.

    DatascanDataProfileResultScannedData, DatascanDataProfileResultScannedDataArgs

    IncrementalField DatascanDataProfileResultScannedDataIncrementalField

    The range denoted by values of an incremental field Structure is documented below.

    IncrementalField DatascanDataProfileResultScannedDataIncrementalField

    The range denoted by values of an incremental field Structure is documented below.

    incrementalField DatascanDataProfileResultScannedDataIncrementalField

    The range denoted by values of an incremental field Structure is documented below.

    incrementalField DatascanDataProfileResultScannedDataIncrementalField

    The range denoted by values of an incremental field Structure is documented below.

    incremental_field DatascanDataProfileResultScannedDataIncrementalField

    The range denoted by values of an incremental field Structure is documented below.

    incrementalField Property Map

    The range denoted by values of an incremental field Structure is documented below.

    DatascanDataProfileResultScannedDataIncrementalField, DatascanDataProfileResultScannedDataIncrementalFieldArgs

    End string

    Value that marks the end of the range.

    Field string

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    Start string

    Value that marks the start of the range.

    End string

    Value that marks the end of the range.

    Field string

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    Start string

    Value that marks the start of the range.

    end String

    Value that marks the end of the range.

    field String

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    start String

    Value that marks the start of the range.

    end string

    Value that marks the end of the range.

    field string

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    start string

    Value that marks the start of the range.

    end str

    Value that marks the end of the range.

    field str

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    start str

    Value that marks the start of the range.

    end String

    Value that marks the end of the range.

    field String

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    start String

    Value that marks the start of the range.

    DatascanDataProfileSpec, DatascanDataProfileSpecArgs

    ExcludeFields DatascanDataProfileSpecExcludeFields

    The fields to exclude from data profile. If specified, the fields will be excluded from data profile, regardless of include_fields value. Structure is documented below.

    IncludeFields DatascanDataProfileSpecIncludeFields

    The fields to include in data profile. If not specified, all fields at the time of profile scan job execution are included, except for ones listed in exclude_fields. Structure is documented below.

    PostScanActions DatascanDataProfileSpecPostScanActions

    Actions to take upon job completion. Structure is documented below.

    RowFilter string

    A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10

    SamplingPercent double

    The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100.

    ExcludeFields DatascanDataProfileSpecExcludeFields

    The fields to exclude from data profile. If specified, the fields will be excluded from data profile, regardless of include_fields value. Structure is documented below.

    IncludeFields DatascanDataProfileSpecIncludeFields

    The fields to include in data profile. If not specified, all fields at the time of profile scan job execution are included, except for ones listed in exclude_fields. Structure is documented below.

    PostScanActions DatascanDataProfileSpecPostScanActions

    Actions to take upon job completion. Structure is documented below.

    RowFilter string

    A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10

    SamplingPercent float64

    The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100.

    excludeFields DatascanDataProfileSpecExcludeFields

    The fields to exclude from data profile. If specified, the fields will be excluded from data profile, regardless of include_fields value. Structure is documented below.

    includeFields DatascanDataProfileSpecIncludeFields

    The fields to include in data profile. If not specified, all fields at the time of profile scan job execution are included, except for ones listed in exclude_fields. Structure is documented below.

    postScanActions DatascanDataProfileSpecPostScanActions

    Actions to take upon job completion. Structure is documented below.

    rowFilter String

    A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10

    samplingPercent Double

    The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100.

    excludeFields DatascanDataProfileSpecExcludeFields

    The fields to exclude from data profile. If specified, the fields will be excluded from data profile, regardless of include_fields value. Structure is documented below.

    includeFields DatascanDataProfileSpecIncludeFields

    The fields to include in data profile. If not specified, all fields at the time of profile scan job execution are included, except for ones listed in exclude_fields. Structure is documented below.

    postScanActions DatascanDataProfileSpecPostScanActions

    Actions to take upon job completion. Structure is documented below.

    rowFilter string

    A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10

    samplingPercent number

    The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100.

    exclude_fields DatascanDataProfileSpecExcludeFields

    The fields to exclude from data profile. If specified, the fields will be excluded from data profile, regardless of include_fields value. Structure is documented below.

    include_fields DatascanDataProfileSpecIncludeFields

    The fields to include in data profile. If not specified, all fields at the time of profile scan job execution are included, except for ones listed in exclude_fields. Structure is documented below.

    post_scan_actions DatascanDataProfileSpecPostScanActions

    Actions to take upon job completion. Structure is documented below.

    row_filter str

    A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10

    sampling_percent float

    The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100.

    excludeFields Property Map

    The fields to exclude from data profile. If specified, the fields will be excluded from data profile, regardless of include_fields value. Structure is documented below.

    includeFields Property Map

    The fields to include in data profile. If not specified, all fields at the time of profile scan job execution are included, except for ones listed in exclude_fields. Structure is documented below.

    postScanActions Property Map

    Actions to take upon job completion. Structure is documented below.

    rowFilter String

    A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10

    samplingPercent Number

    The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100.

    DatascanDataProfileSpecExcludeFields, DatascanDataProfileSpecExcludeFieldsArgs

    FieldNames List<string>

    Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.

    FieldNames []string

    Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.

    fieldNames List<String>

    Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.

    fieldNames string[]

    Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.

    field_names Sequence[str]

    Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.

    fieldNames List<String>

    Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.

    DatascanDataProfileSpecIncludeFields, DatascanDataProfileSpecIncludeFieldsArgs

    FieldNames List<string>

    Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.

    FieldNames []string

    Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.

    fieldNames List<String>

    Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.

    fieldNames string[]

    Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.

    field_names Sequence[str]

    Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.

    fieldNames List<String>

    Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.

    DatascanDataProfileSpecPostScanActions, DatascanDataProfileSpecPostScanActionsArgs

    BigqueryExport DatascanDataProfileSpecPostScanActionsBigqueryExport

    If set, results will be exported to the provided BigQuery table. Structure is documented below.

    BigqueryExport DatascanDataProfileSpecPostScanActionsBigqueryExport

    If set, results will be exported to the provided BigQuery table. Structure is documented below.

    bigqueryExport DatascanDataProfileSpecPostScanActionsBigqueryExport

    If set, results will be exported to the provided BigQuery table. Structure is documented below.

    bigqueryExport DatascanDataProfileSpecPostScanActionsBigqueryExport

    If set, results will be exported to the provided BigQuery table. Structure is documented below.

    bigquery_export DatascanDataProfileSpecPostScanActionsBigqueryExport

    If set, results will be exported to the provided BigQuery table. Structure is documented below.

    bigqueryExport Property Map

    If set, results will be exported to the provided BigQuery table. Structure is documented below.

    DatascanDataProfileSpecPostScanActionsBigqueryExport, DatascanDataProfileSpecPostScanActionsBigqueryExportArgs

    ResultsTable string

    The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID

    ResultsTable string

    The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID

    resultsTable String

    The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID

    resultsTable string

    The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID

    results_table str

    The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID

    resultsTable String

    The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID

    DatascanDataQualityResult, DatascanDataQualityResultArgs

    Dimensions List<DatascanDataQualityResultDimension>

    A list of results at the dimension level. Structure is documented below.

    Passed bool

    (Output) Whether the rule passed or failed.

    RowCount string

    The count of rows scanned.

    Rules List<DatascanDataQualityResultRule>

    The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.

    ScannedDatas List<DatascanDataQualityResultScannedData>

    (Output) The data scanned for this result. Structure is documented below.

    Dimensions []DatascanDataQualityResultDimension

    A list of results at the dimension level. Structure is documented below.

    Passed bool

    (Output) Whether the rule passed or failed.

    RowCount string

    The count of rows scanned.

    Rules []DatascanDataQualityResultRule

    The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.

    ScannedDatas []DatascanDataQualityResultScannedData

    (Output) The data scanned for this result. Structure is documented below.

    dimensions List<DatascanDataQualityResultDimension>

    A list of results at the dimension level. Structure is documented below.

    passed Boolean

    (Output) Whether the rule passed or failed.

    rowCount String

    The count of rows scanned.

    rules List<DatascanDataQualityResultRule>

    The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.

    scannedDatas List<DatascanDataQualityResultScannedData>

    (Output) The data scanned for this result. Structure is documented below.

    dimensions DatascanDataQualityResultDimension[]

    A list of results at the dimension level. Structure is documented below.

    passed boolean

    (Output) Whether the rule passed or failed.

    rowCount string

    The count of rows scanned.

    rules DatascanDataQualityResultRule[]

    The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.

    scannedDatas DatascanDataQualityResultScannedData[]

    (Output) The data scanned for this result. Structure is documented below.

    dimensions Sequence[DatascanDataQualityResultDimension]

    A list of results at the dimension level. Structure is documented below.

    passed bool

    (Output) Whether the rule passed or failed.

    row_count str

    The count of rows scanned.

    rules Sequence[DatascanDataQualityResultRule]

    The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.

    scanned_datas Sequence[DatascanDataQualityResultScannedData]

    (Output) The data scanned for this result. Structure is documented below.

    dimensions List<Property Map>

    A list of results at the dimension level. Structure is documented below.

    passed Boolean

    (Output) Whether the rule passed or failed.

    rowCount String

    The count of rows scanned.

    rules List<Property Map>

    The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.

    scannedDatas List<Property Map>

    (Output) The data scanned for this result. Structure is documented below.

    DatascanDataQualityResultDimension, DatascanDataQualityResultDimensionArgs

    Passed bool

    (Output) Whether the rule passed or failed.

    Passed bool

    (Output) Whether the rule passed or failed.

    passed Boolean

    (Output) Whether the rule passed or failed.

    passed boolean

    (Output) Whether the rule passed or failed.

    passed bool

    (Output) Whether the rule passed or failed.

    passed Boolean

    (Output) Whether the rule passed or failed.

    DatascanDataQualityResultRule, DatascanDataQualityResultRuleArgs

    EvaluatedCount string

    (Output) The number of rows a rule was evaluated against. This field is only valid for ColumnMap type rules. Evaluated count can be configured to either

    1. include all rows (default) - with null rows automatically failing rule evaluation, or
    2. exclude null rows from the evaluatedCount, by setting ignore_nulls = true.
    FailingRowsQuery string

    (Output) The query to find rows that did not pass this rule. Only applies to ColumnMap and RowCondition rules.

    NullCount string

    (Output) The number of rows with null values in the specified column.

    PassRatio int

    (Output) The ratio of passedCount / evaluatedCount. This field is only valid for ColumnMap type rules.

    Passed bool

    (Output) Whether the rule passed or failed.

    PassedCount string

    (Output) The number of rows which passed a rule evaluation. This field is only valid for ColumnMap type rules.

    Rules List<DatascanDataQualityResultRuleRule>

    (Output) The rule specified in the DataQualitySpec, as is. Structure is documented below.

    EvaluatedCount string

    (Output) The number of rows a rule was evaluated against. This field is only valid for ColumnMap type rules. Evaluated count can be configured to either

    1. include all rows (default) - with null rows automatically failing rule evaluation, or
    2. exclude null rows from the evaluatedCount, by setting ignore_nulls = true.
    FailingRowsQuery string

    (Output) The query to find rows that did not pass this rule. Only applies to ColumnMap and RowCondition rules.

    NullCount string

    (Output) The number of rows with null values in the specified column.

    PassRatio int

    (Output) The ratio of passedCount / evaluatedCount. This field is only valid for ColumnMap type rules.

    Passed bool

    (Output) Whether the rule passed or failed.

    PassedCount string

    (Output) The number of rows which passed a rule evaluation. This field is only valid for ColumnMap type rules.

    Rules []DatascanDataQualityResultRuleRule

    (Output) The rule specified in the DataQualitySpec, as is. Structure is documented below.

    evaluatedCount String

    (Output) The number of rows a rule was evaluated against. This field is only valid for ColumnMap type rules. Evaluated count can be configured to either

    1. include all rows (default) - with null rows automatically failing rule evaluation, or
    2. exclude null rows from the evaluatedCount, by setting ignore_nulls = true.
    failingRowsQuery String

    (Output) The query to find rows that did not pass this rule. Only applies to ColumnMap and RowCondition rules.

    nullCount String

    (Output) The number of rows with null values in the specified column.

    passRatio Integer

    (Output) The ratio of passedCount / evaluatedCount. This field is only valid for ColumnMap type rules.

    passed Boolean

    (Output) Whether the rule passed or failed.

    passedCount String

    (Output) The number of rows which passed a rule evaluation. This field is only valid for ColumnMap type rules.

    rules List<DatascanDataQualityResultRuleRule>

    (Output) The rule specified in the DataQualitySpec, as is. Structure is documented below.

    evaluatedCount string

    (Output) The number of rows a rule was evaluated against. This field is only valid for ColumnMap type rules. Evaluated count can be configured to either

    1. include all rows (default) - with null rows automatically failing rule evaluation, or
    2. exclude null rows from the evaluatedCount, by setting ignore_nulls = true.
    failingRowsQuery string

    (Output) The query to find rows that did not pass this rule. Only applies to ColumnMap and RowCondition rules.

    nullCount string

    (Output) The number of rows with null values in the specified column.

    passRatio number

    (Output) The ratio of passedCount / evaluatedCount. This field is only valid for ColumnMap type rules.

    passed boolean

    (Output) Whether the rule passed or failed.

    passedCount string

    (Output) The number of rows which passed a rule evaluation. This field is only valid for ColumnMap type rules.

    rules DatascanDataQualityResultRuleRule[]

    (Output) The rule specified in the DataQualitySpec, as is. Structure is documented below.

    evaluated_count str

    (Output) The number of rows a rule was evaluated against. This field is only valid for ColumnMap type rules. Evaluated count can be configured to either

    1. include all rows (default) - with null rows automatically failing rule evaluation, or
    2. exclude null rows from the evaluatedCount, by setting ignore_nulls = true.
    failing_rows_query str

    (Output) The query to find rows that did not pass this rule. Only applies to ColumnMap and RowCondition rules.

    null_count str

    (Output) The number of rows with null values in the specified column.

    pass_ratio int

    (Output) The ratio of passedCount / evaluatedCount. This field is only valid for ColumnMap type rules.

    passed bool

    (Output) Whether the rule passed or failed.

    passed_count str

    (Output) The number of rows which passed a rule evaluation. This field is only valid for ColumnMap type rules.

    rules Sequence[DatascanDataQualityResultRuleRule]

    (Output) The rule specified in the DataQualitySpec, as is. Structure is documented below.

    evaluatedCount String

    (Output) The number of rows a rule was evaluated against. This field is only valid for ColumnMap type rules. Evaluated count can be configured to either

    1. include all rows (default) - with null rows automatically failing rule evaluation, or
    2. exclude null rows from the evaluatedCount, by setting ignore_nulls = true.
    failingRowsQuery String

    (Output) The query to find rows that did not pass this rule. Only applies to ColumnMap and RowCondition rules.

    nullCount String

    (Output) The number of rows with null values in the specified column.

    passRatio Number

    (Output) The ratio of passedCount / evaluatedCount. This field is only valid for ColumnMap type rules.

    passed Boolean

    (Output) Whether the rule passed or failed.

    passedCount String

    (Output) The number of rows which passed a rule evaluation. This field is only valid for ColumnMap type rules.

    rules List<Property Map>

    (Output) The rule specified in the DataQualitySpec, as is. Structure is documented below.

    DatascanDataQualityResultRuleRule, DatascanDataQualityResultRuleRuleArgs

    Column string

    The unnested column which this rule is evaluated against.

    Dimension string

    The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]

    IgnoreNull bool

    Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.

    NonNullExpectations List<DatascanDataQualityResultRuleRuleNonNullExpectation>

    ColumnMap rule which evaluates whether each column value is null.

    RangeExpectations List<DatascanDataQualityResultRuleRuleRangeExpectation>

    ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.

    RegexExpectations List<DatascanDataQualityResultRuleRuleRegexExpectation>

    ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.

    RowConditionExpectations List<DatascanDataQualityResultRuleRuleRowConditionExpectation>

    Table rule which evaluates whether each row passes the specified condition. Structure is documented below.

    SetExpectations List<DatascanDataQualityResultRuleRuleSetExpectation>

    ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.

    StatisticRangeExpectations List<DatascanDataQualityResultRuleRuleStatisticRangeExpectation>

    ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.

    TableConditionExpectations List<DatascanDataQualityResultRuleRuleTableConditionExpectation>

    Table rule which evaluates whether the provided expression is true. Structure is documented below.

    Threshold int

    The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).

    UniquenessExpectations List<DatascanDataQualityResultRuleRuleUniquenessExpectation>

    Row-level rule which evaluates whether each column value is unique.

    Column string

    The unnested column which this rule is evaluated against.

    Dimension string

    The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]

    IgnoreNull bool

    Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.

    NonNullExpectations []DatascanDataQualityResultRuleRuleNonNullExpectation

    ColumnMap rule which evaluates whether each column value is null.

    RangeExpectations []DatascanDataQualityResultRuleRuleRangeExpectation

    ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.

    RegexExpectations []DatascanDataQualityResultRuleRuleRegexExpectation

    ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.

    RowConditionExpectations []DatascanDataQualityResultRuleRuleRowConditionExpectation

    Table rule which evaluates whether each row passes the specified condition. Structure is documented below.

    SetExpectations []DatascanDataQualityResultRuleRuleSetExpectation

    ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.

    StatisticRangeExpectations []DatascanDataQualityResultRuleRuleStatisticRangeExpectation

    ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.

    TableConditionExpectations []DatascanDataQualityResultRuleRuleTableConditionExpectation

    Table rule which evaluates whether the provided expression is true. Structure is documented below.

    Threshold int

    The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).

    UniquenessExpectations []DatascanDataQualityResultRuleRuleUniquenessExpectation

    Row-level rule which evaluates whether each column value is unique.

    column String

    The unnested column which this rule is evaluated against.

    dimension String

    The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]

    ignoreNull Boolean

    Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.

    nonNullExpectations List<DatascanDataQualityResultRuleRuleNonNullExpectation>

    ColumnMap rule which evaluates whether each column value is null.

    rangeExpectations List<DatascanDataQualityResultRuleRuleRangeExpectation>

    ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.

    regexExpectations List<DatascanDataQualityResultRuleRuleRegexExpectation>

    ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.

    rowConditionExpectations List<DatascanDataQualityResultRuleRuleRowConditionExpectation>

    Table rule which evaluates whether each row passes the specified condition. Structure is documented below.

    setExpectations List<DatascanDataQualityResultRuleRuleSetExpectation>

    ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.

    statisticRangeExpectations List<DatascanDataQualityResultRuleRuleStatisticRangeExpectation>

    ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.

    tableConditionExpectations List<DatascanDataQualityResultRuleRuleTableConditionExpectation>

    Table rule which evaluates whether the provided expression is true. Structure is documented below.

    threshold Integer

    The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).

    uniquenessExpectations List<DatascanDataQualityResultRuleRuleUniquenessExpectation>

    Row-level rule which evaluates whether each column value is unique.

    column string

    The unnested column which this rule is evaluated against.

    dimension string

    The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]

    ignoreNull boolean

    Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.

    nonNullExpectations DatascanDataQualityResultRuleRuleNonNullExpectation[]

    ColumnMap rule which evaluates whether each column value is null.

    rangeExpectations DatascanDataQualityResultRuleRuleRangeExpectation[]

    ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.

    regexExpectations DatascanDataQualityResultRuleRuleRegexExpectation[]

    ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.

    rowConditionExpectations DatascanDataQualityResultRuleRuleRowConditionExpectation[]

    Table rule which evaluates whether each row passes the specified condition. Structure is documented below.

    setExpectations DatascanDataQualityResultRuleRuleSetExpectation[]

    ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.

    statisticRangeExpectations DatascanDataQualityResultRuleRuleStatisticRangeExpectation[]

    ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.

    tableConditionExpectations DatascanDataQualityResultRuleRuleTableConditionExpectation[]

    Table rule which evaluates whether the provided expression is true. Structure is documented below.

    threshold number

    The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).

    uniquenessExpectations DatascanDataQualityResultRuleRuleUniquenessExpectation[]

    Row-level rule which evaluates whether each column value is unique.

    column str

    The unnested column which this rule is evaluated against.

    dimension str

    The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]

    ignore_null bool

    Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.

    non_null_expectations Sequence[DatascanDataQualityResultRuleRuleNonNullExpectation]

    ColumnMap rule which evaluates whether each column value is null.

    range_expectations Sequence[DatascanDataQualityResultRuleRuleRangeExpectation]

    ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.

    regex_expectations Sequence[DatascanDataQualityResultRuleRuleRegexExpectation]

    ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.

    row_condition_expectations Sequence[DatascanDataQualityResultRuleRuleRowConditionExpectation]

    Table rule which evaluates whether each row passes the specified condition. Structure is documented below.

    set_expectations Sequence[DatascanDataQualityResultRuleRuleSetExpectation]

    ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.

    statistic_range_expectations Sequence[DatascanDataQualityResultRuleRuleStatisticRangeExpectation]

    ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.

    table_condition_expectations Sequence[DatascanDataQualityResultRuleRuleTableConditionExpectation]

    Table rule which evaluates whether the provided expression is true. Structure is documented below.

    threshold int

    The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).

    uniqueness_expectations Sequence[DatascanDataQualityResultRuleRuleUniquenessExpectation]

    Row-level rule which evaluates whether each column value is unique.

    column String

    The unnested column which this rule is evaluated against.

    dimension String

    The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]

    ignoreNull Boolean

    Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.

    nonNullExpectations List<Property Map>

    ColumnMap rule which evaluates whether each column value is null.

    rangeExpectations List<Property Map>

    ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.

    regexExpectations List<Property Map>

    ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.

    rowConditionExpectations List<Property Map>

    Table rule which evaluates whether each row passes the specified condition. Structure is documented below.

    setExpectations List<Property Map>

    ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.

    statisticRangeExpectations List<Property Map>

    ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.

    tableConditionExpectations List<Property Map>

    Table rule which evaluates whether the provided expression is true. Structure is documented below.

    threshold Number

    The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).

    uniquenessExpectations List<Property Map>

    Row-level rule which evaluates whether each column value is unique.

    DatascanDataQualityResultRuleRuleRangeExpectation, DatascanDataQualityResultRuleRuleRangeExpectationArgs

    MaxValue string

    The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    MinValue string

    The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    StrictMaxEnabled bool

    Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    StrictMinEnabled bool

    Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    MaxValue string

    The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    MinValue string

    The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    StrictMaxEnabled bool

    Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    StrictMinEnabled bool

    Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    maxValue String

    The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    minValue String

    The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    strictMaxEnabled Boolean

    Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strictMinEnabled Boolean

    Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    maxValue string

    The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    minValue string

    The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    strictMaxEnabled boolean

    Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strictMinEnabled boolean

    Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    max_value str

    The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    min_value str

    The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    strict_max_enabled bool

    Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strict_min_enabled bool

    Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    maxValue String

    The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    minValue String

    The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    strictMaxEnabled Boolean

    Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strictMinEnabled Boolean

    Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    DatascanDataQualityResultRuleRuleRegexExpectation, DatascanDataQualityResultRuleRuleRegexExpectationArgs

    Regex string

    A regular expression the column value is expected to match.

    Regex string

    A regular expression the column value is expected to match.

    regex String

    A regular expression the column value is expected to match.

    regex string

    A regular expression the column value is expected to match.

    regex str

    A regular expression the column value is expected to match.

    regex String

    A regular expression the column value is expected to match.

    DatascanDataQualityResultRuleRuleRowConditionExpectation, DatascanDataQualityResultRuleRuleRowConditionExpectationArgs

    SqlExpression string

    The SQL expression.

    SqlExpression string

    The SQL expression.

    sqlExpression String

    The SQL expression.

    sqlExpression string

    The SQL expression.

    sql_expression str

    The SQL expression.

    sqlExpression String

    The SQL expression.

    DatascanDataQualityResultRuleRuleSetExpectation, DatascanDataQualityResultRuleRuleSetExpectationArgs

    Values List<string>

    Expected values for the column value.

    Values []string

    Expected values for the column value.

    values List<String>

    Expected values for the column value.

    values string[]

    Expected values for the column value.

    values Sequence[str]

    Expected values for the column value.

    values List<String>

    Expected values for the column value.

    DatascanDataQualityResultRuleRuleStatisticRangeExpectation, DatascanDataQualityResultRuleRuleStatisticRangeExpectationArgs

    MaxValue string

    The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    MinValue string

    The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    Statistic string

    column statistics. Possible values are: STATISTIC_UNDEFINED, MEAN, MIN, MAX.

    StrictMaxEnabled bool

    Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    StrictMinEnabled bool

    Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    MaxValue string

    The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    MinValue string

    The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    Statistic string

    column statistics. Possible values are: STATISTIC_UNDEFINED, MEAN, MIN, MAX.

    StrictMaxEnabled bool

    Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    StrictMinEnabled bool

    Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    maxValue String

    The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    minValue String

    The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    statistic String

    column statistics. Possible values are: STATISTIC_UNDEFINED, MEAN, MIN, MAX.

    strictMaxEnabled Boolean

    Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strictMinEnabled Boolean

    Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    maxValue string

    The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    minValue string

    The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    statistic string

    column statistics. Possible values are: STATISTIC_UNDEFINED, MEAN, MIN, MAX.

    strictMaxEnabled boolean

    Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strictMinEnabled boolean

    Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    max_value str

    The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    min_value str

    The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    statistic str

    column statistics. Possible values are: STATISTIC_UNDEFINED, MEAN, MIN, MAX.

    strict_max_enabled bool

    Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strict_min_enabled bool

    Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    maxValue String

    The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    minValue String

    The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    statistic String

    column statistics. Possible values are: STATISTIC_UNDEFINED, MEAN, MIN, MAX.

    strictMaxEnabled Boolean

    Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strictMinEnabled Boolean

    Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    DatascanDataQualityResultRuleRuleTableConditionExpectation, DatascanDataQualityResultRuleRuleTableConditionExpectationArgs

    SqlExpression string

    The SQL expression.

    SqlExpression string

    The SQL expression.

    sqlExpression String

    The SQL expression.

    sqlExpression string

    The SQL expression.

    sql_expression str

    The SQL expression.

    sqlExpression String

    The SQL expression.

    DatascanDataQualityResultScannedData, DatascanDataQualityResultScannedDataArgs

    IncrementalField DatascanDataQualityResultScannedDataIncrementalField

    The range denoted by values of an incremental field Structure is documented below.

    IncrementalField DatascanDataQualityResultScannedDataIncrementalField

    The range denoted by values of an incremental field Structure is documented below.

    incrementalField DatascanDataQualityResultScannedDataIncrementalField

    The range denoted by values of an incremental field Structure is documented below.

    incrementalField DatascanDataQualityResultScannedDataIncrementalField

    The range denoted by values of an incremental field Structure is documented below.

    incremental_field DatascanDataQualityResultScannedDataIncrementalField

    The range denoted by values of an incremental field Structure is documented below.

    incrementalField Property Map

    The range denoted by values of an incremental field Structure is documented below.

    DatascanDataQualityResultScannedDataIncrementalField, DatascanDataQualityResultScannedDataIncrementalFieldArgs

    End string

    Value that marks the end of the range.

    Field string

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    Start string

    Value that marks the start of the range.

    End string

    Value that marks the end of the range.

    Field string

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    Start string

    Value that marks the start of the range.

    end String

    Value that marks the end of the range.

    field String

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    start String

    Value that marks the start of the range.

    end string

    Value that marks the end of the range.

    field string

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    start string

    Value that marks the start of the range.

    end str

    Value that marks the end of the range.

    field str

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    start str

    Value that marks the start of the range.

    end String

    Value that marks the end of the range.

    field String

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    start String

    Value that marks the start of the range.

    DatascanDataQualitySpec, DatascanDataQualitySpecArgs

    PostScanActions DatascanDataQualitySpecPostScanActions

    Actions to take upon job completion. Structure is documented below.

    RowFilter string

    A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10

    Rules List<DatascanDataQualitySpecRule>

    The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.

    SamplingPercent double

    The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100.

    PostScanActions DatascanDataQualitySpecPostScanActions

    Actions to take upon job completion. Structure is documented below.

    RowFilter string

    A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10

    Rules []DatascanDataQualitySpecRule

    The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.

    SamplingPercent float64

    The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100.

    postScanActions DatascanDataQualitySpecPostScanActions

    Actions to take upon job completion. Structure is documented below.

    rowFilter String

    A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10

    rules List<DatascanDataQualitySpecRule>

    The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.

    samplingPercent Double

    The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100.

    postScanActions DatascanDataQualitySpecPostScanActions

    Actions to take upon job completion. Structure is documented below.

    rowFilter string

    A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10

    rules DatascanDataQualitySpecRule[]

    The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.

    samplingPercent number

    The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100.

    post_scan_actions DatascanDataQualitySpecPostScanActions

    Actions to take upon job completion. Structure is documented below.

    row_filter str

    A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10

    rules Sequence[DatascanDataQualitySpecRule]

    The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.

    sampling_percent float

    The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100.

    postScanActions Property Map

    Actions to take upon job completion. Structure is documented below.

    rowFilter String

    A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10

    rules List<Property Map>

    The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.

    samplingPercent Number

    The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100.

    DatascanDataQualitySpecPostScanActions, DatascanDataQualitySpecPostScanActionsArgs

    BigqueryExport DatascanDataQualitySpecPostScanActionsBigqueryExport

    If set, results will be exported to the provided BigQuery table. Structure is documented below.

    BigqueryExport DatascanDataQualitySpecPostScanActionsBigqueryExport

    If set, results will be exported to the provided BigQuery table. Structure is documented below.

    bigqueryExport DatascanDataQualitySpecPostScanActionsBigqueryExport

    If set, results will be exported to the provided BigQuery table. Structure is documented below.

    bigqueryExport DatascanDataQualitySpecPostScanActionsBigqueryExport

    If set, results will be exported to the provided BigQuery table. Structure is documented below.

    bigquery_export DatascanDataQualitySpecPostScanActionsBigqueryExport

    If set, results will be exported to the provided BigQuery table. Structure is documented below.

    bigqueryExport Property Map

    If set, results will be exported to the provided BigQuery table. Structure is documented below.

    DatascanDataQualitySpecPostScanActionsBigqueryExport, DatascanDataQualitySpecPostScanActionsBigqueryExportArgs

    ResultsTable string

    The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID

    ResultsTable string

    The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID

    resultsTable String

    The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID

    resultsTable string

    The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID

    results_table str

    The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID

    resultsTable String

    The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID

    DatascanDataQualitySpecRule, DatascanDataQualitySpecRuleArgs

    Dimension string

    The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]

    Column string

    The unnested column which this rule is evaluated against.

    Description string

    Description of the rule. The maximum length is 1,024 characters.

    IgnoreNull bool

    Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.

    Name string

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    NonNullExpectation DatascanDataQualitySpecRuleNonNullExpectation

    ColumnMap rule which evaluates whether each column value is null.

    RangeExpectation DatascanDataQualitySpecRuleRangeExpectation

    ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.

    RegexExpectation DatascanDataQualitySpecRuleRegexExpectation

    ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.

    RowConditionExpectation DatascanDataQualitySpecRuleRowConditionExpectation

    Table rule which evaluates whether each row passes the specified condition. Structure is documented below.

    SetExpectation DatascanDataQualitySpecRuleSetExpectation

    ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.

    StatisticRangeExpectation DatascanDataQualitySpecRuleStatisticRangeExpectation

    ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.

    TableConditionExpectation DatascanDataQualitySpecRuleTableConditionExpectation

    Table rule which evaluates whether the provided expression is true. Structure is documented below.

    Threshold double

    The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).

    UniquenessExpectation DatascanDataQualitySpecRuleUniquenessExpectation

    Row-level rule which evaluates whether each column value is unique.

    Dimension string

    The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]

    Column string

    The unnested column which this rule is evaluated against.

    Description string

    Description of the rule. The maximum length is 1,024 characters.

    IgnoreNull bool

    Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.

    Name string

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    NonNullExpectation DatascanDataQualitySpecRuleNonNullExpectation

    ColumnMap rule which evaluates whether each column value is null.

    RangeExpectation DatascanDataQualitySpecRuleRangeExpectation

    ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.

    RegexExpectation DatascanDataQualitySpecRuleRegexExpectation

    ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.

    RowConditionExpectation DatascanDataQualitySpecRuleRowConditionExpectation

    Table rule which evaluates whether each row passes the specified condition. Structure is documented below.

    SetExpectation DatascanDataQualitySpecRuleSetExpectation

    ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.

    StatisticRangeExpectation DatascanDataQualitySpecRuleStatisticRangeExpectation

    ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.

    TableConditionExpectation DatascanDataQualitySpecRuleTableConditionExpectation

    Table rule which evaluates whether the provided expression is true. Structure is documented below.

    Threshold float64

    The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).

    UniquenessExpectation DatascanDataQualitySpecRuleUniquenessExpectation

    Row-level rule which evaluates whether each column value is unique.

    dimension String

    The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]

    column String

    The unnested column which this rule is evaluated against.

    description String

    Description of the rule. The maximum length is 1,024 characters.

    ignoreNull Boolean

    Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.

    name String

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    nonNullExpectation DatascanDataQualitySpecRuleNonNullExpectation

    ColumnMap rule which evaluates whether each column value is null.

    rangeExpectation DatascanDataQualitySpecRuleRangeExpectation

    ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.

    regexExpectation DatascanDataQualitySpecRuleRegexExpectation

    ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.

    rowConditionExpectation DatascanDataQualitySpecRuleRowConditionExpectation

    Table rule which evaluates whether each row passes the specified condition. Structure is documented below.

    setExpectation DatascanDataQualitySpecRuleSetExpectation

    ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.

    statisticRangeExpectation DatascanDataQualitySpecRuleStatisticRangeExpectation

    ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.

    tableConditionExpectation DatascanDataQualitySpecRuleTableConditionExpectation

    Table rule which evaluates whether the provided expression is true. Structure is documented below.

    threshold Double

    The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).

    uniquenessExpectation DatascanDataQualitySpecRuleUniquenessExpectation

    Row-level rule which evaluates whether each column value is unique.

    dimension string

    The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]

    column string

    The unnested column which this rule is evaluated against.

    description string

    Description of the rule. The maximum length is 1,024 characters.

    ignoreNull boolean

    Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.

    name string

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    nonNullExpectation DatascanDataQualitySpecRuleNonNullExpectation

    ColumnMap rule which evaluates whether each column value is null.

    rangeExpectation DatascanDataQualitySpecRuleRangeExpectation

    ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.

    regexExpectation DatascanDataQualitySpecRuleRegexExpectation

    ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.

    rowConditionExpectation DatascanDataQualitySpecRuleRowConditionExpectation

    Table rule which evaluates whether each row passes the specified condition. Structure is documented below.

    setExpectation DatascanDataQualitySpecRuleSetExpectation

    ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.

    statisticRangeExpectation DatascanDataQualitySpecRuleStatisticRangeExpectation

    ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.

    tableConditionExpectation DatascanDataQualitySpecRuleTableConditionExpectation

    Table rule which evaluates whether the provided expression is true. Structure is documented below.

    threshold number

    The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).

    uniquenessExpectation DatascanDataQualitySpecRuleUniquenessExpectation

    Row-level rule which evaluates whether each column value is unique.

    dimension str

    The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]

    column str

    The unnested column which this rule is evaluated against.

    description str

    Description of the rule. The maximum length is 1,024 characters.

    ignore_null bool

    Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.

    name str

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    non_null_expectation DatascanDataQualitySpecRuleNonNullExpectation

    ColumnMap rule which evaluates whether each column value is null.

    range_expectation DatascanDataQualitySpecRuleRangeExpectation

    ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.

    regex_expectation DatascanDataQualitySpecRuleRegexExpectation

    ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.

    row_condition_expectation DatascanDataQualitySpecRuleRowConditionExpectation

    Table rule which evaluates whether each row passes the specified condition. Structure is documented below.

    set_expectation DatascanDataQualitySpecRuleSetExpectation

    ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.

    statistic_range_expectation DatascanDataQualitySpecRuleStatisticRangeExpectation

    ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.

    table_condition_expectation DatascanDataQualitySpecRuleTableConditionExpectation

    Table rule which evaluates whether the provided expression is true. Structure is documented below.

    threshold float

    The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).

    uniqueness_expectation DatascanDataQualitySpecRuleUniquenessExpectation

    Row-level rule which evaluates whether each column value is unique.

    dimension String

    The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]

    column String

    The unnested column which this rule is evaluated against.

    description String

    Description of the rule. The maximum length is 1,024 characters.

    ignoreNull Boolean

    Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.

    name String

    A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.

    nonNullExpectation Property Map

    ColumnMap rule which evaluates whether each column value is null.

    rangeExpectation Property Map

    ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.

    regexExpectation Property Map

    ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.

    rowConditionExpectation Property Map

    Table rule which evaluates whether each row passes the specified condition. Structure is documented below.

    setExpectation Property Map

    ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.

    statisticRangeExpectation Property Map

    ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.

    tableConditionExpectation Property Map

    Table rule which evaluates whether the provided expression is true. Structure is documented below.

    threshold Number

    The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).

    uniquenessExpectation Property Map

    Row-level rule which evaluates whether each column value is unique.

    DatascanDataQualitySpecRuleRangeExpectation, DatascanDataQualitySpecRuleRangeExpectationArgs

    MaxValue string

    The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    MinValue string

    The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    StrictMaxEnabled bool

    Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    StrictMinEnabled bool

    Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    MaxValue string

    The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    MinValue string

    The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    StrictMaxEnabled bool

    Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    StrictMinEnabled bool

    Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    maxValue String

    The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    minValue String

    The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    strictMaxEnabled Boolean

    Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strictMinEnabled Boolean

    Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    maxValue string

    The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    minValue string

    The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    strictMaxEnabled boolean

    Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strictMinEnabled boolean

    Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    max_value str

    The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    min_value str

    The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    strict_max_enabled bool

    Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strict_min_enabled bool

    Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    maxValue String

    The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    minValue String

    The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    strictMaxEnabled Boolean

    Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strictMinEnabled Boolean

    Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    DatascanDataQualitySpecRuleRegexExpectation, DatascanDataQualitySpecRuleRegexExpectationArgs

    Regex string

    A regular expression the column value is expected to match.

    Regex string

    A regular expression the column value is expected to match.

    regex String

    A regular expression the column value is expected to match.

    regex string

    A regular expression the column value is expected to match.

    regex str

    A regular expression the column value is expected to match.

    regex String

    A regular expression the column value is expected to match.

    DatascanDataQualitySpecRuleRowConditionExpectation, DatascanDataQualitySpecRuleRowConditionExpectationArgs

    SqlExpression string

    The SQL expression.

    SqlExpression string

    The SQL expression.

    sqlExpression String

    The SQL expression.

    sqlExpression string

    The SQL expression.

    sql_expression str

    The SQL expression.

    sqlExpression String

    The SQL expression.

    DatascanDataQualitySpecRuleSetExpectation, DatascanDataQualitySpecRuleSetExpectationArgs

    Values List<string>

    Expected values for the column value.

    Values []string

    Expected values for the column value.

    values List<String>

    Expected values for the column value.

    values string[]

    Expected values for the column value.

    values Sequence[str]

    Expected values for the column value.

    values List<String>

    Expected values for the column value.

    DatascanDataQualitySpecRuleStatisticRangeExpectation, DatascanDataQualitySpecRuleStatisticRangeExpectationArgs

    Statistic string

    column statistics. Possible values are: STATISTIC_UNDEFINED, MEAN, MIN, MAX.

    MaxValue string

    The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    MinValue string

    The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    StrictMaxEnabled bool

    Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    StrictMinEnabled bool

    Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    Statistic string

    column statistics. Possible values are: STATISTIC_UNDEFINED, MEAN, MIN, MAX.

    MaxValue string

    The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    MinValue string

    The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    StrictMaxEnabled bool

    Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    StrictMinEnabled bool

    Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    statistic String

    column statistics. Possible values are: STATISTIC_UNDEFINED, MEAN, MIN, MAX.

    maxValue String

    The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    minValue String

    The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    strictMaxEnabled Boolean

    Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strictMinEnabled Boolean

    Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    statistic string

    column statistics. Possible values are: STATISTIC_UNDEFINED, MEAN, MIN, MAX.

    maxValue string

    The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    minValue string

    The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    strictMaxEnabled boolean

    Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strictMinEnabled boolean

    Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    statistic str

    column statistics. Possible values are: STATISTIC_UNDEFINED, MEAN, MIN, MAX.

    max_value str

    The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    min_value str

    The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    strict_max_enabled bool

    Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strict_min_enabled bool

    Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    statistic String

    column statistics. Possible values are: STATISTIC_UNDEFINED, MEAN, MIN, MAX.

    maxValue String

    The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    minValue String

    The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.

    strictMaxEnabled Boolean

    Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.

    strictMinEnabled Boolean

    Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.

    DatascanDataQualitySpecRuleTableConditionExpectation, DatascanDataQualitySpecRuleTableConditionExpectationArgs

    SqlExpression string

    The SQL expression.

    SqlExpression string

    The SQL expression.

    sqlExpression String

    The SQL expression.

    sqlExpression string

    The SQL expression.

    sql_expression str

    The SQL expression.

    sqlExpression String

    The SQL expression.

    DatascanExecutionSpec, DatascanExecutionSpecArgs

    Trigger DatascanExecutionSpecTrigger

    Spec related to how often and when a scan should be triggered. Structure is documented below.

    Field string

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    Trigger DatascanExecutionSpecTrigger

    Spec related to how often and when a scan should be triggered. Structure is documented below.

    Field string

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    trigger DatascanExecutionSpecTrigger

    Spec related to how often and when a scan should be triggered. Structure is documented below.

    field String

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    trigger DatascanExecutionSpecTrigger

    Spec related to how often and when a scan should be triggered. Structure is documented below.

    field string

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    trigger DatascanExecutionSpecTrigger

    Spec related to how often and when a scan should be triggered. Structure is documented below.

    field str

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    trigger Property Map

    Spec related to how often and when a scan should be triggered. Structure is documented below.

    field String

    The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.

    DatascanExecutionSpecTrigger, DatascanExecutionSpecTriggerArgs

    OnDemand DatascanExecutionSpecTriggerOnDemand

    The scan runs once via dataScans.run API.

    Schedule DatascanExecutionSpecTriggerSchedule

    The scan is scheduled to run periodically. Structure is documented below.

    OnDemand DatascanExecutionSpecTriggerOnDemand

    The scan runs once via dataScans.run API.

    Schedule DatascanExecutionSpecTriggerSchedule

    The scan is scheduled to run periodically. Structure is documented below.

    onDemand DatascanExecutionSpecTriggerOnDemand

    The scan runs once via dataScans.run API.

    schedule DatascanExecutionSpecTriggerSchedule

    The scan is scheduled to run periodically. Structure is documented below.

    onDemand DatascanExecutionSpecTriggerOnDemand

    The scan runs once via dataScans.run API.

    schedule DatascanExecutionSpecTriggerSchedule

    The scan is scheduled to run periodically. Structure is documented below.

    on_demand DatascanExecutionSpecTriggerOnDemand

    The scan runs once via dataScans.run API.

    schedule DatascanExecutionSpecTriggerSchedule

    The scan is scheduled to run periodically. Structure is documented below.

    onDemand Property Map

    The scan runs once via dataScans.run API.

    schedule Property Map

    The scan is scheduled to run periodically. Structure is documented below.

    DatascanExecutionSpecTriggerSchedule, DatascanExecutionSpecTriggerScheduleArgs

    Cron string

    Cron schedule for running scans periodically. This field is required for Schedule scans.


    Cron string

    Cron schedule for running scans periodically. This field is required for Schedule scans.


    cron String

    Cron schedule for running scans periodically. This field is required for Schedule scans.


    cron string

    Cron schedule for running scans periodically. This field is required for Schedule scans.


    cron str

    Cron schedule for running scans periodically. This field is required for Schedule scans.


    cron String

    Cron schedule for running scans periodically. This field is required for Schedule scans.


    DatascanExecutionStatus, DatascanExecutionStatusArgs

    LatestJobEndTime string

    (Output) The time when the latest DataScanJob started.

    LatestJobStartTime string

    (Output) The time when the latest DataScanJob ended.

    LatestJobEndTime string

    (Output) The time when the latest DataScanJob started.

    LatestJobStartTime string

    (Output) The time when the latest DataScanJob ended.

    latestJobEndTime String

    (Output) The time when the latest DataScanJob started.

    latestJobStartTime String

    (Output) The time when the latest DataScanJob ended.

    latestJobEndTime string

    (Output) The time when the latest DataScanJob started.

    latestJobStartTime string

    (Output) The time when the latest DataScanJob ended.

    latest_job_end_time str

    (Output) The time when the latest DataScanJob started.

    latest_job_start_time str

    (Output) The time when the latest DataScanJob ended.

    latestJobEndTime String

    (Output) The time when the latest DataScanJob started.

    latestJobStartTime String

    (Output) The time when the latest DataScanJob ended.

    Import

    Datascan can be imported using any of these accepted formats

     $ pulumi import gcp:dataplex/datascan:Datascan default projects/{{project}}/locations/{{location}}/dataScans/{{data_scan_id}}
    
     $ pulumi import gcp:dataplex/datascan:Datascan default {{project}}/{{location}}/{{data_scan_id}}
    
     $ pulumi import gcp:dataplex/datascan:Datascan default {{location}}/{{data_scan_id}}
    
     $ pulumi import gcp:dataplex/datascan:Datascan default {{data_scan_id}}
    

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes

    This Pulumi package is based on the google-beta Terraform Provider.

    gcp logo
    Google Cloud Classic v6.67.0 published on Wednesday, Sep 27, 2023 by Pulumi