1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. bigquery
  5. Table
Google Cloud Classic v8.10.0 published on Wednesday, Nov 20, 2024 by Pulumi

gcp.bigquery.Table

Explore with Pulumi AI

gcp logo
Google Cloud Classic v8.10.0 published on Wednesday, Nov 20, 2024 by Pulumi

    Creates a table resource in a dataset for Google BigQuery. For more information see the official documentation and API.

    Note: On newer versions of the provider, you must explicitly set deletion_protection=false (and run pulumi update to write the field to state) in order to destroy an instance. It is recommended to not set this field (or set it to true) until you’re ready to destroy.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const _default = new gcp.bigquery.Dataset("default", {
        datasetId: "foo",
        friendlyName: "test",
        description: "This is a test description",
        location: "EU",
        defaultTableExpirationMs: 3600000,
        labels: {
            env: "default",
        },
    });
    const defaultTable = new gcp.bigquery.Table("default", {
        datasetId: _default.datasetId,
        tableId: "bar",
        timePartitioning: {
            type: "DAY",
        },
        labels: {
            env: "default",
        },
        schema: `[
      {
        "name": "permalink",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "The Permalink"
      },
      {
        "name": "state",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "State where the head office is located"
      }
    ]
    `,
    });
    const sheet = new gcp.bigquery.Table("sheet", {
        datasetId: _default.datasetId,
        tableId: "sheet",
        externalDataConfiguration: {
            autodetect: true,
            sourceFormat: "GOOGLE_SHEETS",
            googleSheetsOptions: {
                skipLeadingRows: 1,
            },
            sourceUris: ["https://docs.google.com/spreadsheets/d/123456789012345"],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    default = gcp.bigquery.Dataset("default",
        dataset_id="foo",
        friendly_name="test",
        description="This is a test description",
        location="EU",
        default_table_expiration_ms=3600000,
        labels={
            "env": "default",
        })
    default_table = gcp.bigquery.Table("default",
        dataset_id=default.dataset_id,
        table_id="bar",
        time_partitioning={
            "type": "DAY",
        },
        labels={
            "env": "default",
        },
        schema="""[
      {
        "name": "permalink",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "The Permalink"
      },
      {
        "name": "state",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "State where the head office is located"
      }
    ]
    """)
    sheet = gcp.bigquery.Table("sheet",
        dataset_id=default.dataset_id,
        table_id="sheet",
        external_data_configuration={
            "autodetect": True,
            "source_format": "GOOGLE_SHEETS",
            "google_sheets_options": {
                "skip_leading_rows": 1,
            },
            "source_uris": ["https://docs.google.com/spreadsheets/d/123456789012345"],
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := bigquery.NewDataset(ctx, "default", &bigquery.DatasetArgs{
    			DatasetId:                pulumi.String("foo"),
    			FriendlyName:             pulumi.String("test"),
    			Description:              pulumi.String("This is a test description"),
    			Location:                 pulumi.String("EU"),
    			DefaultTableExpirationMs: pulumi.Int(3600000),
    			Labels: pulumi.StringMap{
    				"env": pulumi.String("default"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewTable(ctx, "default", &bigquery.TableArgs{
    			DatasetId: _default.DatasetId,
    			TableId:   pulumi.String("bar"),
    			TimePartitioning: &bigquery.TableTimePartitioningArgs{
    				Type: pulumi.String("DAY"),
    			},
    			Labels: pulumi.StringMap{
    				"env": pulumi.String("default"),
    			},
    			Schema: pulumi.String(`[
      {
        "name": "permalink",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "The Permalink"
      },
      {
        "name": "state",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "State where the head office is located"
      }
    ]
    `),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewTable(ctx, "sheet", &bigquery.TableArgs{
    			DatasetId: _default.DatasetId,
    			TableId:   pulumi.String("sheet"),
    			ExternalDataConfiguration: &bigquery.TableExternalDataConfigurationArgs{
    				Autodetect:   pulumi.Bool(true),
    				SourceFormat: pulumi.String("GOOGLE_SHEETS"),
    				GoogleSheetsOptions: &bigquery.TableExternalDataConfigurationGoogleSheetsOptionsArgs{
    					SkipLeadingRows: pulumi.Int(1),
    				},
    				SourceUris: pulumi.StringArray{
    					pulumi.String("https://docs.google.com/spreadsheets/d/123456789012345"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var @default = new Gcp.BigQuery.Dataset("default", new()
        {
            DatasetId = "foo",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "EU",
            DefaultTableExpirationMs = 3600000,
            Labels = 
            {
                { "env", "default" },
            },
        });
    
        var defaultTable = new Gcp.BigQuery.Table("default", new()
        {
            DatasetId = @default.DatasetId,
            TableId = "bar",
            TimePartitioning = new Gcp.BigQuery.Inputs.TableTimePartitioningArgs
            {
                Type = "DAY",
            },
            Labels = 
            {
                { "env", "default" },
            },
            Schema = @"[
      {
        ""name"": ""permalink"",
        ""type"": ""STRING"",
        ""mode"": ""NULLABLE"",
        ""description"": ""The Permalink""
      },
      {
        ""name"": ""state"",
        ""type"": ""STRING"",
        ""mode"": ""NULLABLE"",
        ""description"": ""State where the head office is located""
      }
    ]
    ",
        });
    
        var sheet = new Gcp.BigQuery.Table("sheet", new()
        {
            DatasetId = @default.DatasetId,
            TableId = "sheet",
            ExternalDataConfiguration = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationArgs
            {
                Autodetect = true,
                SourceFormat = "GOOGLE_SHEETS",
                GoogleSheetsOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationGoogleSheetsOptionsArgs
                {
                    SkipLeadingRows = 1,
                },
                SourceUris = new[]
                {
                    "https://docs.google.com/spreadsheets/d/123456789012345",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.bigquery.inputs.TableTimePartitioningArgs;
    import com.pulumi.gcp.bigquery.inputs.TableExternalDataConfigurationArgs;
    import com.pulumi.gcp.bigquery.inputs.TableExternalDataConfigurationGoogleSheetsOptionsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var default_ = new Dataset("default", DatasetArgs.builder()
                .datasetId("foo")
                .friendlyName("test")
                .description("This is a test description")
                .location("EU")
                .defaultTableExpirationMs(3600000)
                .labels(Map.of("env", "default"))
                .build());
    
            var defaultTable = new Table("defaultTable", TableArgs.builder()
                .datasetId(default_.datasetId())
                .tableId("bar")
                .timePartitioning(TableTimePartitioningArgs.builder()
                    .type("DAY")
                    .build())
                .labels(Map.of("env", "default"))
                .schema("""
    [
      {
        "name": "permalink",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "The Permalink"
      },
      {
        "name": "state",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "State where the head office is located"
      }
    ]
                """)
                .build());
    
            var sheet = new Table("sheet", TableArgs.builder()
                .datasetId(default_.datasetId())
                .tableId("sheet")
                .externalDataConfiguration(TableExternalDataConfigurationArgs.builder()
                    .autodetect(true)
                    .sourceFormat("GOOGLE_SHEETS")
                    .googleSheetsOptions(TableExternalDataConfigurationGoogleSheetsOptionsArgs.builder()
                        .skipLeadingRows(1)
                        .build())
                    .sourceUris("https://docs.google.com/spreadsheets/d/123456789012345")
                    .build())
                .build());
    
        }
    }
    
    resources:
      default:
        type: gcp:bigquery:Dataset
        properties:
          datasetId: foo
          friendlyName: test
          description: This is a test description
          location: EU
          defaultTableExpirationMs: 3.6e+06
          labels:
            env: default
      defaultTable:
        type: gcp:bigquery:Table
        name: default
        properties:
          datasetId: ${default.datasetId}
          tableId: bar
          timePartitioning:
            type: DAY
          labels:
            env: default
          schema: |
            [
              {
                "name": "permalink",
                "type": "STRING",
                "mode": "NULLABLE",
                "description": "The Permalink"
              },
              {
                "name": "state",
                "type": "STRING",
                "mode": "NULLABLE",
                "description": "State where the head office is located"
              }
            ]        
      sheet:
        type: gcp:bigquery:Table
        properties:
          datasetId: ${default.datasetId}
          tableId: sheet
          externalDataConfiguration:
            autodetect: true
            sourceFormat: GOOGLE_SHEETS
            googleSheetsOptions:
              skipLeadingRows: 1
            sourceUris:
              - https://docs.google.com/spreadsheets/d/123456789012345
    

    Create Table Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Table(name: string, args: TableArgs, opts?: CustomResourceOptions);
    @overload
    def Table(resource_name: str,
              args: TableArgs,
              opts: Optional[ResourceOptions] = None)
    
    @overload
    def Table(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              dataset_id: Optional[str] = None,
              table_id: Optional[str] = None,
              materialized_view: Optional[TableMaterializedViewArgs] = None,
              max_staleness: Optional[str] = None,
              description: Optional[str] = None,
              encryption_configuration: Optional[TableEncryptionConfigurationArgs] = None,
              expiration_time: Optional[int] = None,
              external_data_configuration: Optional[TableExternalDataConfigurationArgs] = None,
              friendly_name: Optional[str] = None,
              labels: Optional[Mapping[str, str]] = None,
              biglake_configuration: Optional[TableBiglakeConfigurationArgs] = None,
              deletion_protection: Optional[bool] = None,
              project: Optional[str] = None,
              range_partitioning: Optional[TableRangePartitioningArgs] = None,
              require_partition_filter: Optional[bool] = None,
              resource_tags: Optional[Mapping[str, str]] = None,
              schema: Optional[str] = None,
              table_constraints: Optional[TableTableConstraintsArgs] = None,
              clusterings: Optional[Sequence[str]] = None,
              table_replication_info: Optional[TableTableReplicationInfoArgs] = None,
              time_partitioning: Optional[TableTimePartitioningArgs] = None,
              view: Optional[TableViewArgs] = None)
    func NewTable(ctx *Context, name string, args TableArgs, opts ...ResourceOption) (*Table, error)
    public Table(string name, TableArgs args, CustomResourceOptions? opts = null)
    public Table(String name, TableArgs args)
    public Table(String name, TableArgs args, CustomResourceOptions options)
    
    type: gcp:bigquery:Table
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args TableArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args TableArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args TableArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args TableArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args TableArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var gcpTableResource = new Gcp.BigQuery.Table("gcpTableResource", new()
    {
        DatasetId = "string",
        TableId = "string",
        MaterializedView = new Gcp.BigQuery.Inputs.TableMaterializedViewArgs
        {
            Query = "string",
            AllowNonIncrementalDefinition = false,
            EnableRefresh = false,
            RefreshIntervalMs = 0,
        },
        MaxStaleness = "string",
        Description = "string",
        EncryptionConfiguration = new Gcp.BigQuery.Inputs.TableEncryptionConfigurationArgs
        {
            KmsKeyName = "string",
            KmsKeyVersion = "string",
        },
        ExpirationTime = 0,
        ExternalDataConfiguration = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationArgs
        {
            Autodetect = false,
            SourceUris = new[]
            {
                "string",
            },
            JsonExtension = "string",
            JsonOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationJsonOptionsArgs
            {
                Encoding = "string",
            },
            ConnectionId = "string",
            CsvOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationCsvOptionsArgs
            {
                Quote = "string",
                AllowJaggedRows = false,
                AllowQuotedNewlines = false,
                Encoding = "string",
                FieldDelimiter = "string",
                SkipLeadingRows = 0,
            },
            FileSetSpecType = "string",
            GoogleSheetsOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationGoogleSheetsOptionsArgs
            {
                Range = "string",
                SkipLeadingRows = 0,
            },
            HivePartitioningOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationHivePartitioningOptionsArgs
            {
                Mode = "string",
                RequirePartitionFilter = false,
                SourceUriPrefix = "string",
            },
            IgnoreUnknownValues = false,
            BigtableOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationBigtableOptionsArgs
            {
                ColumnFamilies = new[]
                {
                    new Gcp.BigQuery.Inputs.TableExternalDataConfigurationBigtableOptionsColumnFamilyArgs
                    {
                        Columns = new[]
                        {
                            new Gcp.BigQuery.Inputs.TableExternalDataConfigurationBigtableOptionsColumnFamilyColumnArgs
                            {
                                Encoding = "string",
                                FieldName = "string",
                                OnlyReadLatest = false,
                                QualifierEncoded = "string",
                                QualifierString = "string",
                                Type = "string",
                            },
                        },
                        Encoding = "string",
                        FamilyId = "string",
                        OnlyReadLatest = false,
                        Type = "string",
                    },
                },
                IgnoreUnspecifiedColumnFamilies = false,
                OutputColumnFamiliesAsJson = false,
                ReadRowkeyAsString = false,
            },
            Compression = "string",
            MaxBadRecords = 0,
            MetadataCacheMode = "string",
            ObjectMetadata = "string",
            ParquetOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationParquetOptionsArgs
            {
                EnableListInference = false,
                EnumAsString = false,
            },
            ReferenceFileSchemaUri = "string",
            Schema = "string",
            SourceFormat = "string",
            AvroOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationAvroOptionsArgs
            {
                UseAvroLogicalTypes = false,
            },
        },
        FriendlyName = "string",
        Labels = 
        {
            { "string", "string" },
        },
        BiglakeConfiguration = new Gcp.BigQuery.Inputs.TableBiglakeConfigurationArgs
        {
            ConnectionId = "string",
            FileFormat = "string",
            StorageUri = "string",
            TableFormat = "string",
        },
        DeletionProtection = false,
        Project = "string",
        RangePartitioning = new Gcp.BigQuery.Inputs.TableRangePartitioningArgs
        {
            Field = "string",
            Range = new Gcp.BigQuery.Inputs.TableRangePartitioningRangeArgs
            {
                End = 0,
                Interval = 0,
                Start = 0,
            },
        },
        RequirePartitionFilter = false,
        ResourceTags = 
        {
            { "string", "string" },
        },
        Schema = "string",
        TableConstraints = new Gcp.BigQuery.Inputs.TableTableConstraintsArgs
        {
            ForeignKeys = new[]
            {
                new Gcp.BigQuery.Inputs.TableTableConstraintsForeignKeyArgs
                {
                    ColumnReferences = new Gcp.BigQuery.Inputs.TableTableConstraintsForeignKeyColumnReferencesArgs
                    {
                        ReferencedColumn = "string",
                        ReferencingColumn = "string",
                    },
                    ReferencedTable = new Gcp.BigQuery.Inputs.TableTableConstraintsForeignKeyReferencedTableArgs
                    {
                        DatasetId = "string",
                        ProjectId = "string",
                        TableId = "string",
                    },
                    Name = "string",
                },
            },
            PrimaryKey = new Gcp.BigQuery.Inputs.TableTableConstraintsPrimaryKeyArgs
            {
                Columns = new[]
                {
                    "string",
                },
            },
        },
        Clusterings = new[]
        {
            "string",
        },
        TableReplicationInfo = new Gcp.BigQuery.Inputs.TableTableReplicationInfoArgs
        {
            SourceDatasetId = "string",
            SourceProjectId = "string",
            SourceTableId = "string",
            ReplicationIntervalMs = 0,
        },
        TimePartitioning = new Gcp.BigQuery.Inputs.TableTimePartitioningArgs
        {
            Type = "string",
            ExpirationMs = 0,
            Field = "string",
        },
        View = new Gcp.BigQuery.Inputs.TableViewArgs
        {
            Query = "string",
            UseLegacySql = false,
        },
    });
    
    example, err := bigquery.NewTable(ctx, "gcpTableResource", &bigquery.TableArgs{
    	DatasetId: pulumi.String("string"),
    	TableId:   pulumi.String("string"),
    	MaterializedView: &bigquery.TableMaterializedViewArgs{
    		Query:                         pulumi.String("string"),
    		AllowNonIncrementalDefinition: pulumi.Bool(false),
    		EnableRefresh:                 pulumi.Bool(false),
    		RefreshIntervalMs:             pulumi.Int(0),
    	},
    	MaxStaleness: pulumi.String("string"),
    	Description:  pulumi.String("string"),
    	EncryptionConfiguration: &bigquery.TableEncryptionConfigurationArgs{
    		KmsKeyName:    pulumi.String("string"),
    		KmsKeyVersion: pulumi.String("string"),
    	},
    	ExpirationTime: pulumi.Int(0),
    	ExternalDataConfiguration: &bigquery.TableExternalDataConfigurationArgs{
    		Autodetect: pulumi.Bool(false),
    		SourceUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		JsonExtension: pulumi.String("string"),
    		JsonOptions: &bigquery.TableExternalDataConfigurationJsonOptionsArgs{
    			Encoding: pulumi.String("string"),
    		},
    		ConnectionId: pulumi.String("string"),
    		CsvOptions: &bigquery.TableExternalDataConfigurationCsvOptionsArgs{
    			Quote:               pulumi.String("string"),
    			AllowJaggedRows:     pulumi.Bool(false),
    			AllowQuotedNewlines: pulumi.Bool(false),
    			Encoding:            pulumi.String("string"),
    			FieldDelimiter:      pulumi.String("string"),
    			SkipLeadingRows:     pulumi.Int(0),
    		},
    		FileSetSpecType: pulumi.String("string"),
    		GoogleSheetsOptions: &bigquery.TableExternalDataConfigurationGoogleSheetsOptionsArgs{
    			Range:           pulumi.String("string"),
    			SkipLeadingRows: pulumi.Int(0),
    		},
    		HivePartitioningOptions: &bigquery.TableExternalDataConfigurationHivePartitioningOptionsArgs{
    			Mode:                   pulumi.String("string"),
    			RequirePartitionFilter: pulumi.Bool(false),
    			SourceUriPrefix:        pulumi.String("string"),
    		},
    		IgnoreUnknownValues: pulumi.Bool(false),
    		BigtableOptions: &bigquery.TableExternalDataConfigurationBigtableOptionsArgs{
    			ColumnFamilies: bigquery.TableExternalDataConfigurationBigtableOptionsColumnFamilyArray{
    				&bigquery.TableExternalDataConfigurationBigtableOptionsColumnFamilyArgs{
    					Columns: bigquery.TableExternalDataConfigurationBigtableOptionsColumnFamilyColumnArray{
    						&bigquery.TableExternalDataConfigurationBigtableOptionsColumnFamilyColumnArgs{
    							Encoding:         pulumi.String("string"),
    							FieldName:        pulumi.String("string"),
    							OnlyReadLatest:   pulumi.Bool(false),
    							QualifierEncoded: pulumi.String("string"),
    							QualifierString:  pulumi.String("string"),
    							Type:             pulumi.String("string"),
    						},
    					},
    					Encoding:       pulumi.String("string"),
    					FamilyId:       pulumi.String("string"),
    					OnlyReadLatest: pulumi.Bool(false),
    					Type:           pulumi.String("string"),
    				},
    			},
    			IgnoreUnspecifiedColumnFamilies: pulumi.Bool(false),
    			OutputColumnFamiliesAsJson:      pulumi.Bool(false),
    			ReadRowkeyAsString:              pulumi.Bool(false),
    		},
    		Compression:       pulumi.String("string"),
    		MaxBadRecords:     pulumi.Int(0),
    		MetadataCacheMode: pulumi.String("string"),
    		ObjectMetadata:    pulumi.String("string"),
    		ParquetOptions: &bigquery.TableExternalDataConfigurationParquetOptionsArgs{
    			EnableListInference: pulumi.Bool(false),
    			EnumAsString:        pulumi.Bool(false),
    		},
    		ReferenceFileSchemaUri: pulumi.String("string"),
    		Schema:                 pulumi.String("string"),
    		SourceFormat:           pulumi.String("string"),
    		AvroOptions: &bigquery.TableExternalDataConfigurationAvroOptionsArgs{
    			UseAvroLogicalTypes: pulumi.Bool(false),
    		},
    	},
    	FriendlyName: pulumi.String("string"),
    	Labels: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	BiglakeConfiguration: &bigquery.TableBiglakeConfigurationArgs{
    		ConnectionId: pulumi.String("string"),
    		FileFormat:   pulumi.String("string"),
    		StorageUri:   pulumi.String("string"),
    		TableFormat:  pulumi.String("string"),
    	},
    	DeletionProtection: pulumi.Bool(false),
    	Project:            pulumi.String("string"),
    	RangePartitioning: &bigquery.TableRangePartitioningArgs{
    		Field: pulumi.String("string"),
    		Range: &bigquery.TableRangePartitioningRangeArgs{
    			End:      pulumi.Int(0),
    			Interval: pulumi.Int(0),
    			Start:    pulumi.Int(0),
    		},
    	},
    	RequirePartitionFilter: pulumi.Bool(false),
    	ResourceTags: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	Schema: pulumi.String("string"),
    	TableConstraints: &bigquery.TableTableConstraintsArgs{
    		ForeignKeys: bigquery.TableTableConstraintsForeignKeyArray{
    			&bigquery.TableTableConstraintsForeignKeyArgs{
    				ColumnReferences: &bigquery.TableTableConstraintsForeignKeyColumnReferencesArgs{
    					ReferencedColumn:  pulumi.String("string"),
    					ReferencingColumn: pulumi.String("string"),
    				},
    				ReferencedTable: &bigquery.TableTableConstraintsForeignKeyReferencedTableArgs{
    					DatasetId: pulumi.String("string"),
    					ProjectId: pulumi.String("string"),
    					TableId:   pulumi.String("string"),
    				},
    				Name: pulumi.String("string"),
    			},
    		},
    		PrimaryKey: &bigquery.TableTableConstraintsPrimaryKeyArgs{
    			Columns: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    	},
    	Clusterings: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    	TableReplicationInfo: &bigquery.TableTableReplicationInfoArgs{
    		SourceDatasetId:       pulumi.String("string"),
    		SourceProjectId:       pulumi.String("string"),
    		SourceTableId:         pulumi.String("string"),
    		ReplicationIntervalMs: pulumi.Int(0),
    	},
    	TimePartitioning: &bigquery.TableTimePartitioningArgs{
    		Type:         pulumi.String("string"),
    		ExpirationMs: pulumi.Int(0),
    		Field:        pulumi.String("string"),
    	},
    	View: &bigquery.TableViewArgs{
    		Query:        pulumi.String("string"),
    		UseLegacySql: pulumi.Bool(false),
    	},
    })
    
    var gcpTableResource = new Table("gcpTableResource", TableArgs.builder()
        .datasetId("string")
        .tableId("string")
        .materializedView(TableMaterializedViewArgs.builder()
            .query("string")
            .allowNonIncrementalDefinition(false)
            .enableRefresh(false)
            .refreshIntervalMs(0)
            .build())
        .maxStaleness("string")
        .description("string")
        .encryptionConfiguration(TableEncryptionConfigurationArgs.builder()
            .kmsKeyName("string")
            .kmsKeyVersion("string")
            .build())
        .expirationTime(0)
        .externalDataConfiguration(TableExternalDataConfigurationArgs.builder()
            .autodetect(false)
            .sourceUris("string")
            .jsonExtension("string")
            .jsonOptions(TableExternalDataConfigurationJsonOptionsArgs.builder()
                .encoding("string")
                .build())
            .connectionId("string")
            .csvOptions(TableExternalDataConfigurationCsvOptionsArgs.builder()
                .quote("string")
                .allowJaggedRows(false)
                .allowQuotedNewlines(false)
                .encoding("string")
                .fieldDelimiter("string")
                .skipLeadingRows(0)
                .build())
            .fileSetSpecType("string")
            .googleSheetsOptions(TableExternalDataConfigurationGoogleSheetsOptionsArgs.builder()
                .range("string")
                .skipLeadingRows(0)
                .build())
            .hivePartitioningOptions(TableExternalDataConfigurationHivePartitioningOptionsArgs.builder()
                .mode("string")
                .requirePartitionFilter(false)
                .sourceUriPrefix("string")
                .build())
            .ignoreUnknownValues(false)
            .bigtableOptions(TableExternalDataConfigurationBigtableOptionsArgs.builder()
                .columnFamilies(TableExternalDataConfigurationBigtableOptionsColumnFamilyArgs.builder()
                    .columns(TableExternalDataConfigurationBigtableOptionsColumnFamilyColumnArgs.builder()
                        .encoding("string")
                        .fieldName("string")
                        .onlyReadLatest(false)
                        .qualifierEncoded("string")
                        .qualifierString("string")
                        .type("string")
                        .build())
                    .encoding("string")
                    .familyId("string")
                    .onlyReadLatest(false)
                    .type("string")
                    .build())
                .ignoreUnspecifiedColumnFamilies(false)
                .outputColumnFamiliesAsJson(false)
                .readRowkeyAsString(false)
                .build())
            .compression("string")
            .maxBadRecords(0)
            .metadataCacheMode("string")
            .objectMetadata("string")
            .parquetOptions(TableExternalDataConfigurationParquetOptionsArgs.builder()
                .enableListInference(false)
                .enumAsString(false)
                .build())
            .referenceFileSchemaUri("string")
            .schema("string")
            .sourceFormat("string")
            .avroOptions(TableExternalDataConfigurationAvroOptionsArgs.builder()
                .useAvroLogicalTypes(false)
                .build())
            .build())
        .friendlyName("string")
        .labels(Map.of("string", "string"))
        .biglakeConfiguration(TableBiglakeConfigurationArgs.builder()
            .connectionId("string")
            .fileFormat("string")
            .storageUri("string")
            .tableFormat("string")
            .build())
        .deletionProtection(false)
        .project("string")
        .rangePartitioning(TableRangePartitioningArgs.builder()
            .field("string")
            .range(TableRangePartitioningRangeArgs.builder()
                .end(0)
                .interval(0)
                .start(0)
                .build())
            .build())
        .requirePartitionFilter(false)
        .resourceTags(Map.of("string", "string"))
        .schema("string")
        .tableConstraints(TableTableConstraintsArgs.builder()
            .foreignKeys(TableTableConstraintsForeignKeyArgs.builder()
                .columnReferences(TableTableConstraintsForeignKeyColumnReferencesArgs.builder()
                    .referencedColumn("string")
                    .referencingColumn("string")
                    .build())
                .referencedTable(TableTableConstraintsForeignKeyReferencedTableArgs.builder()
                    .datasetId("string")
                    .projectId("string")
                    .tableId("string")
                    .build())
                .name("string")
                .build())
            .primaryKey(TableTableConstraintsPrimaryKeyArgs.builder()
                .columns("string")
                .build())
            .build())
        .clusterings("string")
        .tableReplicationInfo(TableTableReplicationInfoArgs.builder()
            .sourceDatasetId("string")
            .sourceProjectId("string")
            .sourceTableId("string")
            .replicationIntervalMs(0)
            .build())
        .timePartitioning(TableTimePartitioningArgs.builder()
            .type("string")
            .expirationMs(0)
            .field("string")
            .build())
        .view(TableViewArgs.builder()
            .query("string")
            .useLegacySql(false)
            .build())
        .build());
    
    gcp_table_resource = gcp.bigquery.Table("gcpTableResource",
        dataset_id="string",
        table_id="string",
        materialized_view={
            "query": "string",
            "allow_non_incremental_definition": False,
            "enable_refresh": False,
            "refresh_interval_ms": 0,
        },
        max_staleness="string",
        description="string",
        encryption_configuration={
            "kms_key_name": "string",
            "kms_key_version": "string",
        },
        expiration_time=0,
        external_data_configuration={
            "autodetect": False,
            "source_uris": ["string"],
            "json_extension": "string",
            "json_options": {
                "encoding": "string",
            },
            "connection_id": "string",
            "csv_options": {
                "quote": "string",
                "allow_jagged_rows": False,
                "allow_quoted_newlines": False,
                "encoding": "string",
                "field_delimiter": "string",
                "skip_leading_rows": 0,
            },
            "file_set_spec_type": "string",
            "google_sheets_options": {
                "range": "string",
                "skip_leading_rows": 0,
            },
            "hive_partitioning_options": {
                "mode": "string",
                "require_partition_filter": False,
                "source_uri_prefix": "string",
            },
            "ignore_unknown_values": False,
            "bigtable_options": {
                "column_families": [{
                    "columns": [{
                        "encoding": "string",
                        "field_name": "string",
                        "only_read_latest": False,
                        "qualifier_encoded": "string",
                        "qualifier_string": "string",
                        "type": "string",
                    }],
                    "encoding": "string",
                    "family_id": "string",
                    "only_read_latest": False,
                    "type": "string",
                }],
                "ignore_unspecified_column_families": False,
                "output_column_families_as_json": False,
                "read_rowkey_as_string": False,
            },
            "compression": "string",
            "max_bad_records": 0,
            "metadata_cache_mode": "string",
            "object_metadata": "string",
            "parquet_options": {
                "enable_list_inference": False,
                "enum_as_string": False,
            },
            "reference_file_schema_uri": "string",
            "schema": "string",
            "source_format": "string",
            "avro_options": {
                "use_avro_logical_types": False,
            },
        },
        friendly_name="string",
        labels={
            "string": "string",
        },
        biglake_configuration={
            "connection_id": "string",
            "file_format": "string",
            "storage_uri": "string",
            "table_format": "string",
        },
        deletion_protection=False,
        project="string",
        range_partitioning={
            "field": "string",
            "range": {
                "end": 0,
                "interval": 0,
                "start": 0,
            },
        },
        require_partition_filter=False,
        resource_tags={
            "string": "string",
        },
        schema="string",
        table_constraints={
            "foreign_keys": [{
                "column_references": {
                    "referenced_column": "string",
                    "referencing_column": "string",
                },
                "referenced_table": {
                    "dataset_id": "string",
                    "project_id": "string",
                    "table_id": "string",
                },
                "name": "string",
            }],
            "primary_key": {
                "columns": ["string"],
            },
        },
        clusterings=["string"],
        table_replication_info={
            "source_dataset_id": "string",
            "source_project_id": "string",
            "source_table_id": "string",
            "replication_interval_ms": 0,
        },
        time_partitioning={
            "type": "string",
            "expiration_ms": 0,
            "field": "string",
        },
        view={
            "query": "string",
            "use_legacy_sql": False,
        })
    
    const gcpTableResource = new gcp.bigquery.Table("gcpTableResource", {
        datasetId: "string",
        tableId: "string",
        materializedView: {
            query: "string",
            allowNonIncrementalDefinition: false,
            enableRefresh: false,
            refreshIntervalMs: 0,
        },
        maxStaleness: "string",
        description: "string",
        encryptionConfiguration: {
            kmsKeyName: "string",
            kmsKeyVersion: "string",
        },
        expirationTime: 0,
        externalDataConfiguration: {
            autodetect: false,
            sourceUris: ["string"],
            jsonExtension: "string",
            jsonOptions: {
                encoding: "string",
            },
            connectionId: "string",
            csvOptions: {
                quote: "string",
                allowJaggedRows: false,
                allowQuotedNewlines: false,
                encoding: "string",
                fieldDelimiter: "string",
                skipLeadingRows: 0,
            },
            fileSetSpecType: "string",
            googleSheetsOptions: {
                range: "string",
                skipLeadingRows: 0,
            },
            hivePartitioningOptions: {
                mode: "string",
                requirePartitionFilter: false,
                sourceUriPrefix: "string",
            },
            ignoreUnknownValues: false,
            bigtableOptions: {
                columnFamilies: [{
                    columns: [{
                        encoding: "string",
                        fieldName: "string",
                        onlyReadLatest: false,
                        qualifierEncoded: "string",
                        qualifierString: "string",
                        type: "string",
                    }],
                    encoding: "string",
                    familyId: "string",
                    onlyReadLatest: false,
                    type: "string",
                }],
                ignoreUnspecifiedColumnFamilies: false,
                outputColumnFamiliesAsJson: false,
                readRowkeyAsString: false,
            },
            compression: "string",
            maxBadRecords: 0,
            metadataCacheMode: "string",
            objectMetadata: "string",
            parquetOptions: {
                enableListInference: false,
                enumAsString: false,
            },
            referenceFileSchemaUri: "string",
            schema: "string",
            sourceFormat: "string",
            avroOptions: {
                useAvroLogicalTypes: false,
            },
        },
        friendlyName: "string",
        labels: {
            string: "string",
        },
        biglakeConfiguration: {
            connectionId: "string",
            fileFormat: "string",
            storageUri: "string",
            tableFormat: "string",
        },
        deletionProtection: false,
        project: "string",
        rangePartitioning: {
            field: "string",
            range: {
                end: 0,
                interval: 0,
                start: 0,
            },
        },
        requirePartitionFilter: false,
        resourceTags: {
            string: "string",
        },
        schema: "string",
        tableConstraints: {
            foreignKeys: [{
                columnReferences: {
                    referencedColumn: "string",
                    referencingColumn: "string",
                },
                referencedTable: {
                    datasetId: "string",
                    projectId: "string",
                    tableId: "string",
                },
                name: "string",
            }],
            primaryKey: {
                columns: ["string"],
            },
        },
        clusterings: ["string"],
        tableReplicationInfo: {
            sourceDatasetId: "string",
            sourceProjectId: "string",
            sourceTableId: "string",
            replicationIntervalMs: 0,
        },
        timePartitioning: {
            type: "string",
            expirationMs: 0,
            field: "string",
        },
        view: {
            query: "string",
            useLegacySql: false,
        },
    });
    
    type: gcp:bigquery:Table
    properties:
        biglakeConfiguration:
            connectionId: string
            fileFormat: string
            storageUri: string
            tableFormat: string
        clusterings:
            - string
        datasetId: string
        deletionProtection: false
        description: string
        encryptionConfiguration:
            kmsKeyName: string
            kmsKeyVersion: string
        expirationTime: 0
        externalDataConfiguration:
            autodetect: false
            avroOptions:
                useAvroLogicalTypes: false
            bigtableOptions:
                columnFamilies:
                    - columns:
                        - encoding: string
                          fieldName: string
                          onlyReadLatest: false
                          qualifierEncoded: string
                          qualifierString: string
                          type: string
                      encoding: string
                      familyId: string
                      onlyReadLatest: false
                      type: string
                ignoreUnspecifiedColumnFamilies: false
                outputColumnFamiliesAsJson: false
                readRowkeyAsString: false
            compression: string
            connectionId: string
            csvOptions:
                allowJaggedRows: false
                allowQuotedNewlines: false
                encoding: string
                fieldDelimiter: string
                quote: string
                skipLeadingRows: 0
            fileSetSpecType: string
            googleSheetsOptions:
                range: string
                skipLeadingRows: 0
            hivePartitioningOptions:
                mode: string
                requirePartitionFilter: false
                sourceUriPrefix: string
            ignoreUnknownValues: false
            jsonExtension: string
            jsonOptions:
                encoding: string
            maxBadRecords: 0
            metadataCacheMode: string
            objectMetadata: string
            parquetOptions:
                enableListInference: false
                enumAsString: false
            referenceFileSchemaUri: string
            schema: string
            sourceFormat: string
            sourceUris:
                - string
        friendlyName: string
        labels:
            string: string
        materializedView:
            allowNonIncrementalDefinition: false
            enableRefresh: false
            query: string
            refreshIntervalMs: 0
        maxStaleness: string
        project: string
        rangePartitioning:
            field: string
            range:
                end: 0
                interval: 0
                start: 0
        requirePartitionFilter: false
        resourceTags:
            string: string
        schema: string
        tableConstraints:
            foreignKeys:
                - columnReferences:
                    referencedColumn: string
                    referencingColumn: string
                  name: string
                  referencedTable:
                    datasetId: string
                    projectId: string
                    tableId: string
            primaryKey:
                columns:
                    - string
        tableId: string
        tableReplicationInfo:
            replicationIntervalMs: 0
            sourceDatasetId: string
            sourceProjectId: string
            sourceTableId: string
        timePartitioning:
            expirationMs: 0
            field: string
            type: string
        view:
            query: string
            useLegacySql: false
    

    Table Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The Table resource accepts the following input properties:

    DatasetId string
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    TableId string
    A unique ID for the resource. Changing this forces a new resource to be created.
    BiglakeConfiguration TableBiglakeConfiguration
    Specifies the configuration of a BigLake managed table. Structure is documented below
    Clusterings List<string>
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    DeletionProtection bool
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    Description string
    The field description.
    EncryptionConfiguration TableEncryptionConfiguration
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    ExpirationTime int
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    ExternalDataConfiguration TableExternalDataConfiguration
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    FriendlyName string
    A descriptive name for the table.
    Labels Dictionary<string, string>

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    MaterializedView TableMaterializedView
    If specified, configures this table as a materialized view. Structure is documented below.
    MaxStaleness string
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    RangePartitioning TableRangePartitioning
    If specified, configures range-based partitioning for this table. Structure is documented below.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    ResourceTags Dictionary<string, string>
    The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
    Schema string
    A JSON schema for the table.
    TableConstraints TableTableConstraints
    Defines the primary key and foreign keys. Structure is documented below.
    TableReplicationInfo TableTableReplicationInfo
    Replication info of a table created using "AS REPLICA" DDL like: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below.
    TimePartitioning TableTimePartitioning
    If specified, configures time-based partitioning for this table. Structure is documented below.
    View TableView
    If specified, configures this table as a view. Structure is documented below.
    DatasetId string
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    TableId string
    A unique ID for the resource. Changing this forces a new resource to be created.
    BiglakeConfiguration TableBiglakeConfigurationArgs
    Specifies the configuration of a BigLake managed table. Structure is documented below
    Clusterings []string
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    DeletionProtection bool
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    Description string
    The field description.
    EncryptionConfiguration TableEncryptionConfigurationArgs
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    ExpirationTime int
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    ExternalDataConfiguration TableExternalDataConfigurationArgs
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    FriendlyName string
    A descriptive name for the table.
    Labels map[string]string

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    MaterializedView TableMaterializedViewArgs
    If specified, configures this table as a materialized view. Structure is documented below.
    MaxStaleness string
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    RangePartitioning TableRangePartitioningArgs
    If specified, configures range-based partitioning for this table. Structure is documented below.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    ResourceTags map[string]string
    The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
    Schema string
    A JSON schema for the table.
    TableConstraints TableTableConstraintsArgs
    Defines the primary key and foreign keys. Structure is documented below.
    TableReplicationInfo TableTableReplicationInfoArgs
    Replication info of a table created using "AS REPLICA" DDL like: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below.
    TimePartitioning TableTimePartitioningArgs
    If specified, configures time-based partitioning for this table. Structure is documented below.
    View TableViewArgs
    If specified, configures this table as a view. Structure is documented below.
    datasetId String
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    tableId String
    A unique ID for the resource. Changing this forces a new resource to be created.
    biglakeConfiguration TableBiglakeConfiguration
    Specifies the configuration of a BigLake managed table. Structure is documented below
    clusterings List<String>
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    deletionProtection Boolean
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description String
    The field description.
    encryptionConfiguration TableEncryptionConfiguration
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    expirationTime Integer
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    externalDataConfiguration TableExternalDataConfiguration
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendlyName String
    A descriptive name for the table.
    labels Map<String,String>

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    materializedView TableMaterializedView
    If specified, configures this table as a materialized view. Structure is documented below.
    maxStaleness String
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    rangePartitioning TableRangePartitioning
    If specified, configures range-based partitioning for this table. Structure is documented below.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    resourceTags Map<String,String>
    The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
    schema String
    A JSON schema for the table.
    tableConstraints TableTableConstraints
    Defines the primary key and foreign keys. Structure is documented below.
    tableReplicationInfo TableTableReplicationInfo
    Replication info of a table created using "AS REPLICA" DDL like: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below.
    timePartitioning TableTimePartitioning
    If specified, configures time-based partitioning for this table. Structure is documented below.
    view TableView
    If specified, configures this table as a view. Structure is documented below.
    datasetId string
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    tableId string
    A unique ID for the resource. Changing this forces a new resource to be created.
    biglakeConfiguration TableBiglakeConfiguration
    Specifies the configuration of a BigLake managed table. Structure is documented below
    clusterings string[]
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    deletionProtection boolean
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description string
    The field description.
    encryptionConfiguration TableEncryptionConfiguration
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    expirationTime number
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    externalDataConfiguration TableExternalDataConfiguration
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendlyName string
    A descriptive name for the table.
    labels {[key: string]: string}

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    materializedView TableMaterializedView
    If specified, configures this table as a materialized view. Structure is documented below.
    maxStaleness string
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    rangePartitioning TableRangePartitioning
    If specified, configures range-based partitioning for this table. Structure is documented below.
    requirePartitionFilter boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    resourceTags {[key: string]: string}
    The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
    schema string
    A JSON schema for the table.
    tableConstraints TableTableConstraints
    Defines the primary key and foreign keys. Structure is documented below.
    tableReplicationInfo TableTableReplicationInfo
    Replication info of a table created using "AS REPLICA" DDL like: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below.
    timePartitioning TableTimePartitioning
    If specified, configures time-based partitioning for this table. Structure is documented below.
    view TableView
    If specified, configures this table as a view. Structure is documented below.
    dataset_id str
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    table_id str
    A unique ID for the resource. Changing this forces a new resource to be created.
    biglake_configuration TableBiglakeConfigurationArgs
    Specifies the configuration of a BigLake managed table. Structure is documented below
    clusterings Sequence[str]
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    deletion_protection bool
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description str
    The field description.
    encryption_configuration TableEncryptionConfigurationArgs
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    expiration_time int
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    external_data_configuration TableExternalDataConfigurationArgs
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendly_name str
    A descriptive name for the table.
    labels Mapping[str, str]

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    materialized_view TableMaterializedViewArgs
    If specified, configures this table as a materialized view. Structure is documented below.
    max_staleness str
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    range_partitioning TableRangePartitioningArgs
    If specified, configures range-based partitioning for this table. Structure is documented below.
    require_partition_filter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    resource_tags Mapping[str, str]
    The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
    schema str
    A JSON schema for the table.
    table_constraints TableTableConstraintsArgs
    Defines the primary key and foreign keys. Structure is documented below.
    table_replication_info TableTableReplicationInfoArgs
    Replication info of a table created using "AS REPLICA" DDL like: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below.
    time_partitioning TableTimePartitioningArgs
    If specified, configures time-based partitioning for this table. Structure is documented below.
    view TableViewArgs
    If specified, configures this table as a view. Structure is documented below.
    datasetId String
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    tableId String
    A unique ID for the resource. Changing this forces a new resource to be created.
    biglakeConfiguration Property Map
    Specifies the configuration of a BigLake managed table. Structure is documented below
    clusterings List<String>
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    deletionProtection Boolean
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description String
    The field description.
    encryptionConfiguration Property Map
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    expirationTime Number
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    externalDataConfiguration Property Map
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendlyName String
    A descriptive name for the table.
    labels Map<String>

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    materializedView Property Map
    If specified, configures this table as a materialized view. Structure is documented below.
    maxStaleness String
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    rangePartitioning Property Map
    If specified, configures range-based partitioning for this table. Structure is documented below.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    resourceTags Map<String>
    The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
    schema String
    A JSON schema for the table.
    tableConstraints Property Map
    Defines the primary key and foreign keys. Structure is documented below.
    tableReplicationInfo Property Map
    Replication info of a table created using "AS REPLICA" DDL like: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below.
    timePartitioning Property Map
    If specified, configures time-based partitioning for this table. Structure is documented below.
    view Property Map
    If specified, configures this table as a view. Structure is documented below.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Table resource produces the following output properties:

    CreationTime int
    The time when this table was created, in milliseconds since the epoch.
    EffectiveLabels Dictionary<string, string>

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    Etag string
    A hash of the resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    LastModifiedTime int
    The time when this table was last modified, in milliseconds since the epoch.
    Location string
    The geographic location where the table resides. This value is inherited from the dataset.
    NumBytes int
    The size of this table in bytes, excluding any data in the streaming buffer.
    NumLongTermBytes int
    The number of bytes in the table that are considered "long-term storage".
    NumRows int
    The number of rows of data in this table, excluding any data in the streaming buffer.
    PulumiLabels Dictionary<string, string>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    SelfLink string
    The URI of the created resource.
    Type string
    Describes the table type.
    CreationTime int
    The time when this table was created, in milliseconds since the epoch.
    EffectiveLabels map[string]string

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    Etag string
    A hash of the resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    LastModifiedTime int
    The time when this table was last modified, in milliseconds since the epoch.
    Location string
    The geographic location where the table resides. This value is inherited from the dataset.
    NumBytes int
    The size of this table in bytes, excluding any data in the streaming buffer.
    NumLongTermBytes int
    The number of bytes in the table that are considered "long-term storage".
    NumRows int
    The number of rows of data in this table, excluding any data in the streaming buffer.
    PulumiLabels map[string]string
    The combination of labels configured directly on the resource and default labels configured on the provider.
    SelfLink string
    The URI of the created resource.
    Type string
    Describes the table type.
    creationTime Integer
    The time when this table was created, in milliseconds since the epoch.
    effectiveLabels Map<String,String>

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    etag String
    A hash of the resource.
    id String
    The provider-assigned unique ID for this managed resource.
    lastModifiedTime Integer
    The time when this table was last modified, in milliseconds since the epoch.
    location String
    The geographic location where the table resides. This value is inherited from the dataset.
    numBytes Integer
    The size of this table in bytes, excluding any data in the streaming buffer.
    numLongTermBytes Integer
    The number of bytes in the table that are considered "long-term storage".
    numRows Integer
    The number of rows of data in this table, excluding any data in the streaming buffer.
    pulumiLabels Map<String,String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    selfLink String
    The URI of the created resource.
    type String
    Describes the table type.
    creationTime number
    The time when this table was created, in milliseconds since the epoch.
    effectiveLabels {[key: string]: string}

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    etag string
    A hash of the resource.
    id string
    The provider-assigned unique ID for this managed resource.
    lastModifiedTime number
    The time when this table was last modified, in milliseconds since the epoch.
    location string
    The geographic location where the table resides. This value is inherited from the dataset.
    numBytes number
    The size of this table in bytes, excluding any data in the streaming buffer.
    numLongTermBytes number
    The number of bytes in the table that are considered "long-term storage".
    numRows number
    The number of rows of data in this table, excluding any data in the streaming buffer.
    pulumiLabels {[key: string]: string}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    selfLink string
    The URI of the created resource.
    type string
    Describes the table type.
    creation_time int
    The time when this table was created, in milliseconds since the epoch.
    effective_labels Mapping[str, str]

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    etag str
    A hash of the resource.
    id str
    The provider-assigned unique ID for this managed resource.
    last_modified_time int
    The time when this table was last modified, in milliseconds since the epoch.
    location str
    The geographic location where the table resides. This value is inherited from the dataset.
    num_bytes int
    The size of this table in bytes, excluding any data in the streaming buffer.
    num_long_term_bytes int
    The number of bytes in the table that are considered "long-term storage".
    num_rows int
    The number of rows of data in this table, excluding any data in the streaming buffer.
    pulumi_labels Mapping[str, str]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    self_link str
    The URI of the created resource.
    type str
    Describes the table type.
    creationTime Number
    The time when this table was created, in milliseconds since the epoch.
    effectiveLabels Map<String>

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    etag String
    A hash of the resource.
    id String
    The provider-assigned unique ID for this managed resource.
    lastModifiedTime Number
    The time when this table was last modified, in milliseconds since the epoch.
    location String
    The geographic location where the table resides. This value is inherited from the dataset.
    numBytes Number
    The size of this table in bytes, excluding any data in the streaming buffer.
    numLongTermBytes Number
    The number of bytes in the table that are considered "long-term storage".
    numRows Number
    The number of rows of data in this table, excluding any data in the streaming buffer.
    pulumiLabels Map<String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    selfLink String
    The URI of the created resource.
    type String
    Describes the table type.

    Look up Existing Table Resource

    Get an existing Table resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: TableState, opts?: CustomResourceOptions): Table
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            biglake_configuration: Optional[TableBiglakeConfigurationArgs] = None,
            clusterings: Optional[Sequence[str]] = None,
            creation_time: Optional[int] = None,
            dataset_id: Optional[str] = None,
            deletion_protection: Optional[bool] = None,
            description: Optional[str] = None,
            effective_labels: Optional[Mapping[str, str]] = None,
            encryption_configuration: Optional[TableEncryptionConfigurationArgs] = None,
            etag: Optional[str] = None,
            expiration_time: Optional[int] = None,
            external_data_configuration: Optional[TableExternalDataConfigurationArgs] = None,
            friendly_name: Optional[str] = None,
            labels: Optional[Mapping[str, str]] = None,
            last_modified_time: Optional[int] = None,
            location: Optional[str] = None,
            materialized_view: Optional[TableMaterializedViewArgs] = None,
            max_staleness: Optional[str] = None,
            num_bytes: Optional[int] = None,
            num_long_term_bytes: Optional[int] = None,
            num_rows: Optional[int] = None,
            project: Optional[str] = None,
            pulumi_labels: Optional[Mapping[str, str]] = None,
            range_partitioning: Optional[TableRangePartitioningArgs] = None,
            require_partition_filter: Optional[bool] = None,
            resource_tags: Optional[Mapping[str, str]] = None,
            schema: Optional[str] = None,
            self_link: Optional[str] = None,
            table_constraints: Optional[TableTableConstraintsArgs] = None,
            table_id: Optional[str] = None,
            table_replication_info: Optional[TableTableReplicationInfoArgs] = None,
            time_partitioning: Optional[TableTimePartitioningArgs] = None,
            type: Optional[str] = None,
            view: Optional[TableViewArgs] = None) -> Table
    func GetTable(ctx *Context, name string, id IDInput, state *TableState, opts ...ResourceOption) (*Table, error)
    public static Table Get(string name, Input<string> id, TableState? state, CustomResourceOptions? opts = null)
    public static Table get(String name, Output<String> id, TableState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    BiglakeConfiguration TableBiglakeConfiguration
    Specifies the configuration of a BigLake managed table. Structure is documented below
    Clusterings List<string>
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    CreationTime int
    The time when this table was created, in milliseconds since the epoch.
    DatasetId string
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    DeletionProtection bool
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    Description string
    The field description.
    EffectiveLabels Dictionary<string, string>

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    EncryptionConfiguration TableEncryptionConfiguration
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    Etag string
    A hash of the resource.
    ExpirationTime int
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    ExternalDataConfiguration TableExternalDataConfiguration
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    FriendlyName string
    A descriptive name for the table.
    Labels Dictionary<string, string>

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    LastModifiedTime int
    The time when this table was last modified, in milliseconds since the epoch.
    Location string
    The geographic location where the table resides. This value is inherited from the dataset.
    MaterializedView TableMaterializedView
    If specified, configures this table as a materialized view. Structure is documented below.
    MaxStaleness string
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    NumBytes int
    The size of this table in bytes, excluding any data in the streaming buffer.
    NumLongTermBytes int
    The number of bytes in the table that are considered "long-term storage".
    NumRows int
    The number of rows of data in this table, excluding any data in the streaming buffer.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PulumiLabels Dictionary<string, string>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    RangePartitioning TableRangePartitioning
    If specified, configures range-based partitioning for this table. Structure is documented below.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    ResourceTags Dictionary<string, string>
    The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
    Schema string
    A JSON schema for the table.
    SelfLink string
    The URI of the created resource.
    TableConstraints TableTableConstraints
    Defines the primary key and foreign keys. Structure is documented below.
    TableId string
    A unique ID for the resource. Changing this forces a new resource to be created.
    TableReplicationInfo TableTableReplicationInfo
    Replication info of a table created using "AS REPLICA" DDL like: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below.
    TimePartitioning TableTimePartitioning
    If specified, configures time-based partitioning for this table. Structure is documented below.
    Type string
    Describes the table type.
    View TableView
    If specified, configures this table as a view. Structure is documented below.
    BiglakeConfiguration TableBiglakeConfigurationArgs
    Specifies the configuration of a BigLake managed table. Structure is documented below
    Clusterings []string
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    CreationTime int
    The time when this table was created, in milliseconds since the epoch.
    DatasetId string
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    DeletionProtection bool
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    Description string
    The field description.
    EffectiveLabels map[string]string

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    EncryptionConfiguration TableEncryptionConfigurationArgs
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    Etag string
    A hash of the resource.
    ExpirationTime int
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    ExternalDataConfiguration TableExternalDataConfigurationArgs
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    FriendlyName string
    A descriptive name for the table.
    Labels map[string]string

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    LastModifiedTime int
    The time when this table was last modified, in milliseconds since the epoch.
    Location string
    The geographic location where the table resides. This value is inherited from the dataset.
    MaterializedView TableMaterializedViewArgs
    If specified, configures this table as a materialized view. Structure is documented below.
    MaxStaleness string
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    NumBytes int
    The size of this table in bytes, excluding any data in the streaming buffer.
    NumLongTermBytes int
    The number of bytes in the table that are considered "long-term storage".
    NumRows int
    The number of rows of data in this table, excluding any data in the streaming buffer.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PulumiLabels map[string]string
    The combination of labels configured directly on the resource and default labels configured on the provider.
    RangePartitioning TableRangePartitioningArgs
    If specified, configures range-based partitioning for this table. Structure is documented below.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    ResourceTags map[string]string
    The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
    Schema string
    A JSON schema for the table.
    SelfLink string
    The URI of the created resource.
    TableConstraints TableTableConstraintsArgs
    Defines the primary key and foreign keys. Structure is documented below.
    TableId string
    A unique ID for the resource. Changing this forces a new resource to be created.
    TableReplicationInfo TableTableReplicationInfoArgs
    Replication info of a table created using "AS REPLICA" DDL like: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below.
    TimePartitioning TableTimePartitioningArgs
    If specified, configures time-based partitioning for this table. Structure is documented below.
    Type string
    Describes the table type.
    View TableViewArgs
    If specified, configures this table as a view. Structure is documented below.
    biglakeConfiguration TableBiglakeConfiguration
    Specifies the configuration of a BigLake managed table. Structure is documented below
    clusterings List<String>
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    creationTime Integer
    The time when this table was created, in milliseconds since the epoch.
    datasetId String
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    deletionProtection Boolean
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description String
    The field description.
    effectiveLabels Map<String,String>

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    encryptionConfiguration TableEncryptionConfiguration
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    etag String
    A hash of the resource.
    expirationTime Integer
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    externalDataConfiguration TableExternalDataConfiguration
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendlyName String
    A descriptive name for the table.
    labels Map<String,String>

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    lastModifiedTime Integer
    The time when this table was last modified, in milliseconds since the epoch.
    location String
    The geographic location where the table resides. This value is inherited from the dataset.
    materializedView TableMaterializedView
    If specified, configures this table as a materialized view. Structure is documented below.
    maxStaleness String
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    numBytes Integer
    The size of this table in bytes, excluding any data in the streaming buffer.
    numLongTermBytes Integer
    The number of bytes in the table that are considered "long-term storage".
    numRows Integer
    The number of rows of data in this table, excluding any data in the streaming buffer.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels Map<String,String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    rangePartitioning TableRangePartitioning
    If specified, configures range-based partitioning for this table. Structure is documented below.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    resourceTags Map<String,String>
    The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
    schema String
    A JSON schema for the table.
    selfLink String
    The URI of the created resource.
    tableConstraints TableTableConstraints
    Defines the primary key and foreign keys. Structure is documented below.
    tableId String
    A unique ID for the resource. Changing this forces a new resource to be created.
    tableReplicationInfo TableTableReplicationInfo
    Replication info of a table created using "AS REPLICA" DDL like: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below.
    timePartitioning TableTimePartitioning
    If specified, configures time-based partitioning for this table. Structure is documented below.
    type String
    Describes the table type.
    view TableView
    If specified, configures this table as a view. Structure is documented below.
    biglakeConfiguration TableBiglakeConfiguration
    Specifies the configuration of a BigLake managed table. Structure is documented below
    clusterings string[]
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    creationTime number
    The time when this table was created, in milliseconds since the epoch.
    datasetId string
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    deletionProtection boolean
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description string
    The field description.
    effectiveLabels {[key: string]: string}

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    encryptionConfiguration TableEncryptionConfiguration
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    etag string
    A hash of the resource.
    expirationTime number
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    externalDataConfiguration TableExternalDataConfiguration
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendlyName string
    A descriptive name for the table.
    labels {[key: string]: string}

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    lastModifiedTime number
    The time when this table was last modified, in milliseconds since the epoch.
    location string
    The geographic location where the table resides. This value is inherited from the dataset.
    materializedView TableMaterializedView
    If specified, configures this table as a materialized view. Structure is documented below.
    maxStaleness string
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    numBytes number
    The size of this table in bytes, excluding any data in the streaming buffer.
    numLongTermBytes number
    The number of bytes in the table that are considered "long-term storage".
    numRows number
    The number of rows of data in this table, excluding any data in the streaming buffer.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels {[key: string]: string}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    rangePartitioning TableRangePartitioning
    If specified, configures range-based partitioning for this table. Structure is documented below.
    requirePartitionFilter boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    resourceTags {[key: string]: string}
    The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
    schema string
    A JSON schema for the table.
    selfLink string
    The URI of the created resource.
    tableConstraints TableTableConstraints
    Defines the primary key and foreign keys. Structure is documented below.
    tableId string
    A unique ID for the resource. Changing this forces a new resource to be created.
    tableReplicationInfo TableTableReplicationInfo
    Replication info of a table created using "AS REPLICA" DDL like: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below.
    timePartitioning TableTimePartitioning
    If specified, configures time-based partitioning for this table. Structure is documented below.
    type string
    Describes the table type.
    view TableView
    If specified, configures this table as a view. Structure is documented below.
    biglake_configuration TableBiglakeConfigurationArgs
    Specifies the configuration of a BigLake managed table. Structure is documented below
    clusterings Sequence[str]
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    creation_time int
    The time when this table was created, in milliseconds since the epoch.
    dataset_id str
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    deletion_protection bool
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description str
    The field description.
    effective_labels Mapping[str, str]

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    encryption_configuration TableEncryptionConfigurationArgs
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    etag str
    A hash of the resource.
    expiration_time int
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    external_data_configuration TableExternalDataConfigurationArgs
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendly_name str
    A descriptive name for the table.
    labels Mapping[str, str]

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    last_modified_time int
    The time when this table was last modified, in milliseconds since the epoch.
    location str
    The geographic location where the table resides. This value is inherited from the dataset.
    materialized_view TableMaterializedViewArgs
    If specified, configures this table as a materialized view. Structure is documented below.
    max_staleness str
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    num_bytes int
    The size of this table in bytes, excluding any data in the streaming buffer.
    num_long_term_bytes int
    The number of bytes in the table that are considered "long-term storage".
    num_rows int
    The number of rows of data in this table, excluding any data in the streaming buffer.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumi_labels Mapping[str, str]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    range_partitioning TableRangePartitioningArgs
    If specified, configures range-based partitioning for this table. Structure is documented below.
    require_partition_filter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    resource_tags Mapping[str, str]
    The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
    schema str
    A JSON schema for the table.
    self_link str
    The URI of the created resource.
    table_constraints TableTableConstraintsArgs
    Defines the primary key and foreign keys. Structure is documented below.
    table_id str
    A unique ID for the resource. Changing this forces a new resource to be created.
    table_replication_info TableTableReplicationInfoArgs
    Replication info of a table created using "AS REPLICA" DDL like: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below.
    time_partitioning TableTimePartitioningArgs
    If specified, configures time-based partitioning for this table. Structure is documented below.
    type str
    Describes the table type.
    view TableViewArgs
    If specified, configures this table as a view. Structure is documented below.
    biglakeConfiguration Property Map
    Specifies the configuration of a BigLake managed table. Structure is documented below
    clusterings List<String>
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    creationTime Number
    The time when this table was created, in milliseconds since the epoch.
    datasetId String
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    deletionProtection Boolean
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description String
    The field description.
    effectiveLabels Map<String>

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    encryptionConfiguration Property Map
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    etag String
    A hash of the resource.
    expirationTime Number
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    externalDataConfiguration Property Map
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendlyName String
    A descriptive name for the table.
    labels Map<String>

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    lastModifiedTime Number
    The time when this table was last modified, in milliseconds since the epoch.
    location String
    The geographic location where the table resides. This value is inherited from the dataset.
    materializedView Property Map
    If specified, configures this table as a materialized view. Structure is documented below.
    maxStaleness String
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    numBytes Number
    The size of this table in bytes, excluding any data in the streaming buffer.
    numLongTermBytes Number
    The number of bytes in the table that are considered "long-term storage".
    numRows Number
    The number of rows of data in this table, excluding any data in the streaming buffer.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels Map<String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    rangePartitioning Property Map
    If specified, configures range-based partitioning for this table. Structure is documented below.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    resourceTags Map<String>
    The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
    schema String
    A JSON schema for the table.
    selfLink String
    The URI of the created resource.
    tableConstraints Property Map
    Defines the primary key and foreign keys. Structure is documented below.
    tableId String
    A unique ID for the resource. Changing this forces a new resource to be created.
    tableReplicationInfo Property Map
    Replication info of a table created using "AS REPLICA" DDL like: CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv. Structure is documented below.
    timePartitioning Property Map
    If specified, configures time-based partitioning for this table. Structure is documented below.
    type String
    Describes the table type.
    view Property Map
    If specified, configures this table as a view. Structure is documented below.

    Supporting Types

    TableBiglakeConfiguration, TableBiglakeConfigurationArgs

    ConnectionId string
    The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project_id>.<location_id>.<connection_id>" or projects/<project_id>/locations/<location_id>/connections/<connection_id>".
    FileFormat string
    The file format the table data is stored in.
    StorageUri string
    The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
    TableFormat string
    The table format the metadata only snapshots are stored in.
    ConnectionId string
    The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project_id>.<location_id>.<connection_id>" or projects/<project_id>/locations/<location_id>/connections/<connection_id>".
    FileFormat string
    The file format the table data is stored in.
    StorageUri string
    The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
    TableFormat string
    The table format the metadata only snapshots are stored in.
    connectionId String
    The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project_id>.<location_id>.<connection_id>" or projects/<project_id>/locations/<location_id>/connections/<connection_id>".
    fileFormat String
    The file format the table data is stored in.
    storageUri String
    The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
    tableFormat String
    The table format the metadata only snapshots are stored in.
    connectionId string
    The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project_id>.<location_id>.<connection_id>" or projects/<project_id>/locations/<location_id>/connections/<connection_id>".
    fileFormat string
    The file format the table data is stored in.
    storageUri string
    The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
    tableFormat string
    The table format the metadata only snapshots are stored in.
    connection_id str
    The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project_id>.<location_id>.<connection_id>" or projects/<project_id>/locations/<location_id>/connections/<connection_id>".
    file_format str
    The file format the table data is stored in.
    storage_uri str
    The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
    table_format str
    The table format the metadata only snapshots are stored in.
    connectionId String
    The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project_id>.<location_id>.<connection_id>" or projects/<project_id>/locations/<location_id>/connections/<connection_id>".
    fileFormat String
    The file format the table data is stored in.
    storageUri String
    The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
    tableFormat String
    The table format the metadata only snapshots are stored in.

    TableEncryptionConfiguration, TableEncryptionConfigurationArgs

    KmsKeyName string
    The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource.
    KmsKeyVersion string
    The self link or full name of the kms key version used to encrypt this table.
    KmsKeyName string
    The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource.
    KmsKeyVersion string
    The self link or full name of the kms key version used to encrypt this table.
    kmsKeyName String
    The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource.
    kmsKeyVersion String
    The self link or full name of the kms key version used to encrypt this table.
    kmsKeyName string
    The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource.
    kmsKeyVersion string
    The self link or full name of the kms key version used to encrypt this table.
    kms_key_name str
    The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource.
    kms_key_version str
    The self link or full name of the kms key version used to encrypt this table.
    kmsKeyName String
    The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource.
    kmsKeyVersion String
    The self link or full name of the kms key version used to encrypt this table.

    TableExternalDataConfiguration, TableExternalDataConfigurationArgs

    Autodetect bool
    Let BigQuery try to autodetect the schema and format of the table.
    SourceUris List<string>
    A list of the fully-qualified URIs that point to your data in Google Cloud.
    AvroOptions TableExternalDataConfigurationAvroOptions
    Additional options if source_format is set to "AVRO". Structure is documented below.
    BigtableOptions TableExternalDataConfigurationBigtableOptions
    Additional properties to set if source_format is set to "BIGTABLE". Structure is documented below.
    Compression string
    The compression type of the data source. Valid values are "NONE" or "GZIP".
    ConnectionId string

    The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}.

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    CsvOptions TableExternalDataConfigurationCsvOptions
    Additional properties to set if source_format is set to "CSV". Structure is documented below.
    FileSetSpecType string
    Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
    GoogleSheetsOptions TableExternalDataConfigurationGoogleSheetsOptions
    Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below.
    HivePartitioningOptions TableExternalDataConfigurationHivePartitioningOptions
    When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
    IgnoreUnknownValues bool
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
    JsonExtension string
    Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the JSON source format. Valid values are: GEOJSON.
    JsonOptions TableExternalDataConfigurationJsonOptions
    Additional properties to set if source_format is set to "JSON". Structure is documented below.
    MaxBadRecords int
    The maximum number of bad records that BigQuery can ignore when reading data.
    MetadataCacheMode string
    Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.
    ObjectMetadata string
    Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.
    ParquetOptions TableExternalDataConfigurationParquetOptions
    Additional properties to set if source_format is set to "PARQUET". Structure is documented below.
    ReferenceFileSchemaUri string
    When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
    Schema string

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    SourceFormat string
    The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly".
    Autodetect bool
    Let BigQuery try to autodetect the schema and format of the table.
    SourceUris []string
    A list of the fully-qualified URIs that point to your data in Google Cloud.
    AvroOptions TableExternalDataConfigurationAvroOptions
    Additional options if source_format is set to "AVRO". Structure is documented below.
    BigtableOptions TableExternalDataConfigurationBigtableOptions
    Additional properties to set if source_format is set to "BIGTABLE". Structure is documented below.
    Compression string
    The compression type of the data source. Valid values are "NONE" or "GZIP".
    ConnectionId string

    The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}.

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    CsvOptions TableExternalDataConfigurationCsvOptions
    Additional properties to set if source_format is set to "CSV". Structure is documented below.
    FileSetSpecType string
    Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
    GoogleSheetsOptions TableExternalDataConfigurationGoogleSheetsOptions
    Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below.
    HivePartitioningOptions TableExternalDataConfigurationHivePartitioningOptions
    When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
    IgnoreUnknownValues bool
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
    JsonExtension string
    Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the JSON source format. Valid values are: GEOJSON.
    JsonOptions TableExternalDataConfigurationJsonOptions
    Additional properties to set if source_format is set to "JSON". Structure is documented below.
    MaxBadRecords int
    The maximum number of bad records that BigQuery can ignore when reading data.
    MetadataCacheMode string
    Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.
    ObjectMetadata string
    Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.
    ParquetOptions TableExternalDataConfigurationParquetOptions
    Additional properties to set if source_format is set to "PARQUET". Structure is documented below.
    ReferenceFileSchemaUri string
    When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
    Schema string

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    SourceFormat string
    The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly".
    autodetect Boolean
    Let BigQuery try to autodetect the schema and format of the table.
    sourceUris List<String>
    A list of the fully-qualified URIs that point to your data in Google Cloud.
    avroOptions TableExternalDataConfigurationAvroOptions
    Additional options if source_format is set to "AVRO". Structure is documented below.
    bigtableOptions TableExternalDataConfigurationBigtableOptions
    Additional properties to set if source_format is set to "BIGTABLE". Structure is documented below.
    compression String
    The compression type of the data source. Valid values are "NONE" or "GZIP".
    connectionId String

    The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}.

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    csvOptions TableExternalDataConfigurationCsvOptions
    Additional properties to set if source_format is set to "CSV". Structure is documented below.
    fileSetSpecType String
    Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
    googleSheetsOptions TableExternalDataConfigurationGoogleSheetsOptions
    Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below.
    hivePartitioningOptions TableExternalDataConfigurationHivePartitioningOptions
    When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
    ignoreUnknownValues Boolean
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
    jsonExtension String
    Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the JSON source format. Valid values are: GEOJSON.
    jsonOptions TableExternalDataConfigurationJsonOptions
    Additional properties to set if source_format is set to "JSON". Structure is documented below.
    maxBadRecords Integer
    The maximum number of bad records that BigQuery can ignore when reading data.
    metadataCacheMode String
    Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.
    objectMetadata String
    Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.
    parquetOptions TableExternalDataConfigurationParquetOptions
    Additional properties to set if source_format is set to "PARQUET". Structure is documented below.
    referenceFileSchemaUri String
    When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
    schema String

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    sourceFormat String
    The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly".
    autodetect boolean
    Let BigQuery try to autodetect the schema and format of the table.
    sourceUris string[]
    A list of the fully-qualified URIs that point to your data in Google Cloud.
    avroOptions TableExternalDataConfigurationAvroOptions
    Additional options if source_format is set to "AVRO". Structure is documented below.
    bigtableOptions TableExternalDataConfigurationBigtableOptions
    Additional properties to set if source_format is set to "BIGTABLE". Structure is documented below.
    compression string
    The compression type of the data source. Valid values are "NONE" or "GZIP".
    connectionId string

    The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}.

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    csvOptions TableExternalDataConfigurationCsvOptions
    Additional properties to set if source_format is set to "CSV". Structure is documented below.
    fileSetSpecType string
    Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
    googleSheetsOptions TableExternalDataConfigurationGoogleSheetsOptions
    Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below.
    hivePartitioningOptions TableExternalDataConfigurationHivePartitioningOptions
    When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
    ignoreUnknownValues boolean
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
    jsonExtension string
    Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the JSON source format. Valid values are: GEOJSON.
    jsonOptions TableExternalDataConfigurationJsonOptions
    Additional properties to set if source_format is set to "JSON". Structure is documented below.
    maxBadRecords number
    The maximum number of bad records that BigQuery can ignore when reading data.
    metadataCacheMode string
    Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.
    objectMetadata string
    Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.
    parquetOptions TableExternalDataConfigurationParquetOptions
    Additional properties to set if source_format is set to "PARQUET". Structure is documented below.
    referenceFileSchemaUri string
    When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
    schema string

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    sourceFormat string
    The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly".
    autodetect bool
    Let BigQuery try to autodetect the schema and format of the table.
    source_uris Sequence[str]
    A list of the fully-qualified URIs that point to your data in Google Cloud.
    avro_options TableExternalDataConfigurationAvroOptions
    Additional options if source_format is set to "AVRO". Structure is documented below.
    bigtable_options TableExternalDataConfigurationBigtableOptions
    Additional properties to set if source_format is set to "BIGTABLE". Structure is documented below.
    compression str
    The compression type of the data source. Valid values are "NONE" or "GZIP".
    connection_id str

    The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}.

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    csv_options TableExternalDataConfigurationCsvOptions
    Additional properties to set if source_format is set to "CSV". Structure is documented below.
    file_set_spec_type str
    Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
    google_sheets_options TableExternalDataConfigurationGoogleSheetsOptions
    Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below.
    hive_partitioning_options TableExternalDataConfigurationHivePartitioningOptions
    When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
    ignore_unknown_values bool
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
    json_extension str
    Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the JSON source format. Valid values are: GEOJSON.
    json_options TableExternalDataConfigurationJsonOptions
    Additional properties to set if source_format is set to "JSON". Structure is documented below.
    max_bad_records int
    The maximum number of bad records that BigQuery can ignore when reading data.
    metadata_cache_mode str
    Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.
    object_metadata str
    Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.
    parquet_options TableExternalDataConfigurationParquetOptions
    Additional properties to set if source_format is set to "PARQUET". Structure is documented below.
    reference_file_schema_uri str
    When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
    schema str

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    source_format str
    The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly".
    autodetect Boolean
    Let BigQuery try to autodetect the schema and format of the table.
    sourceUris List<String>
    A list of the fully-qualified URIs that point to your data in Google Cloud.
    avroOptions Property Map
    Additional options if source_format is set to "AVRO". Structure is documented below.
    bigtableOptions Property Map
    Additional properties to set if source_format is set to "BIGTABLE". Structure is documented below.
    compression String
    The compression type of the data source. Valid values are "NONE" or "GZIP".
    connectionId String

    The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}.

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    csvOptions Property Map
    Additional properties to set if source_format is set to "CSV". Structure is documented below.
    fileSetSpecType String
    Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
    googleSheetsOptions Property Map
    Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below.
    hivePartitioningOptions Property Map
    When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
    ignoreUnknownValues Boolean
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
    jsonExtension String
    Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the JSON source format. Valid values are: GEOJSON.
    jsonOptions Property Map
    Additional properties to set if source_format is set to "JSON". Structure is documented below.
    maxBadRecords Number
    The maximum number of bad records that BigQuery can ignore when reading data.
    metadataCacheMode String
    Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.
    objectMetadata String
    Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.
    parquetOptions Property Map
    Additional properties to set if source_format is set to "PARQUET". Structure is documented below.
    referenceFileSchemaUri String
    When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
    schema String

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    sourceFormat String
    The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly".

    TableExternalDataConfigurationAvroOptions, TableExternalDataConfigurationAvroOptionsArgs

    UseAvroLogicalTypes bool
    If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
    UseAvroLogicalTypes bool
    If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
    useAvroLogicalTypes Boolean
    If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
    useAvroLogicalTypes boolean
    If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
    use_avro_logical_types bool
    If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
    useAvroLogicalTypes Boolean
    If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).

    TableExternalDataConfigurationBigtableOptions, TableExternalDataConfigurationBigtableOptionsArgs

    ColumnFamilies List<TableExternalDataConfigurationBigtableOptionsColumnFamily>
    A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.
    IgnoreUnspecifiedColumnFamilies bool
    If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
    OutputColumnFamiliesAsJson bool
    If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.
    ReadRowkeyAsString bool
    If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.
    ColumnFamilies []TableExternalDataConfigurationBigtableOptionsColumnFamily
    A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.
    IgnoreUnspecifiedColumnFamilies bool
    If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
    OutputColumnFamiliesAsJson bool
    If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.
    ReadRowkeyAsString bool
    If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.
    columnFamilies List<TableExternalDataConfigurationBigtableOptionsColumnFamily>
    A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.
    ignoreUnspecifiedColumnFamilies Boolean
    If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
    outputColumnFamiliesAsJson Boolean
    If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.
    readRowkeyAsString Boolean
    If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.
    columnFamilies TableExternalDataConfigurationBigtableOptionsColumnFamily[]
    A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.
    ignoreUnspecifiedColumnFamilies boolean
    If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
    outputColumnFamiliesAsJson boolean
    If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.
    readRowkeyAsString boolean
    If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.
    column_families Sequence[TableExternalDataConfigurationBigtableOptionsColumnFamily]
    A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.
    ignore_unspecified_column_families bool
    If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
    output_column_families_as_json bool
    If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.
    read_rowkey_as_string bool
    If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.
    columnFamilies List<Property Map>
    A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.
    ignoreUnspecifiedColumnFamilies Boolean
    If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
    outputColumnFamiliesAsJson Boolean
    If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.
    readRowkeyAsString Boolean
    If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.

    TableExternalDataConfigurationBigtableOptionsColumnFamily, TableExternalDataConfigurationBigtableOptionsColumnFamilyArgs

    Columns List<TableExternalDataConfigurationBigtableOptionsColumnFamilyColumn>
    A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.
    Encoding string
    The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
    FamilyId string
    Identifier of the column family.
    OnlyReadLatest bool
    If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
    Type string
    The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.
    Columns []TableExternalDataConfigurationBigtableOptionsColumnFamilyColumn
    A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.
    Encoding string
    The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
    FamilyId string
    Identifier of the column family.
    OnlyReadLatest bool
    If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
    Type string
    The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.
    columns List<TableExternalDataConfigurationBigtableOptionsColumnFamilyColumn>
    A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.
    encoding String
    The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
    familyId String
    Identifier of the column family.
    onlyReadLatest Boolean
    If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
    type String
    The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.
    columns TableExternalDataConfigurationBigtableOptionsColumnFamilyColumn[]
    A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.
    encoding string
    The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
    familyId string
    Identifier of the column family.
    onlyReadLatest boolean
    If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
    type string
    The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.
    columns Sequence[TableExternalDataConfigurationBigtableOptionsColumnFamilyColumn]
    A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.
    encoding str
    The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
    family_id str
    Identifier of the column family.
    only_read_latest bool
    If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
    type str
    The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.
    columns List<Property Map>
    A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.
    encoding String
    The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
    familyId String
    Identifier of the column family.
    onlyReadLatest Boolean
    If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
    type String
    The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.

    TableExternalDataConfigurationBigtableOptionsColumnFamilyColumn, TableExternalDataConfigurationBigtableOptionsColumnFamilyColumnArgs

    Encoding string
    The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
    FieldName string
    If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
    OnlyReadLatest bool
    If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
    QualifierEncoded string
    Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.
    QualifierString string
    Qualifier string.
    Type string
    The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.
    Encoding string
    The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
    FieldName string
    If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
    OnlyReadLatest bool
    If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
    QualifierEncoded string
    Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.
    QualifierString string
    Qualifier string.
    Type string
    The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.
    encoding String
    The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
    fieldName String
    If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
    onlyReadLatest Boolean
    If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
    qualifierEncoded String
    Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.
    qualifierString String
    Qualifier string.
    type String
    The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.
    encoding string
    The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
    fieldName string
    If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
    onlyReadLatest boolean
    If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
    qualifierEncoded string
    Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.
    qualifierString string
    Qualifier string.
    type string
    The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.
    encoding str
    The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
    field_name str
    If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
    only_read_latest bool
    If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
    qualifier_encoded str
    Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.
    qualifier_string str
    Qualifier string.
    type str
    The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.
    encoding String
    The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
    fieldName String
    If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
    onlyReadLatest Boolean
    If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
    qualifierEncoded String
    Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.
    qualifierString String
    Qualifier string.
    type String
    The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.

    TableExternalDataConfigurationCsvOptions, TableExternalDataConfigurationCsvOptionsArgs

    Quote string
    The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in the provider escaped as \". Due to limitations with default values, this value is required to be explicitly set.
    AllowJaggedRows bool
    Indicates if BigQuery should accept rows that are missing trailing optional columns.
    AllowQuotedNewlines bool
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    Encoding string
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
    FieldDelimiter string
    The separator for fields in a CSV file.
    SkipLeadingRows int
    The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
    Quote string
    The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in the provider escaped as \". Due to limitations with default values, this value is required to be explicitly set.
    AllowJaggedRows bool
    Indicates if BigQuery should accept rows that are missing trailing optional columns.
    AllowQuotedNewlines bool
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    Encoding string
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
    FieldDelimiter string
    The separator for fields in a CSV file.
    SkipLeadingRows int
    The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
    quote String
    The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in the provider escaped as \". Due to limitations with default values, this value is required to be explicitly set.
    allowJaggedRows Boolean
    Indicates if BigQuery should accept rows that are missing trailing optional columns.
    allowQuotedNewlines Boolean
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    encoding String
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
    fieldDelimiter String
    The separator for fields in a CSV file.
    skipLeadingRows Integer
    The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
    quote string
    The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in the provider escaped as \". Due to limitations with default values, this value is required to be explicitly set.
    allowJaggedRows boolean
    Indicates if BigQuery should accept rows that are missing trailing optional columns.
    allowQuotedNewlines boolean
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    encoding string
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
    fieldDelimiter string
    The separator for fields in a CSV file.
    skipLeadingRows number
    The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
    quote str
    The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in the provider escaped as \". Due to limitations with default values, this value is required to be explicitly set.
    allow_jagged_rows bool
    Indicates if BigQuery should accept rows that are missing trailing optional columns.
    allow_quoted_newlines bool
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    encoding str
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
    field_delimiter str
    The separator for fields in a CSV file.
    skip_leading_rows int
    The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
    quote String
    The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in the provider escaped as \". Due to limitations with default values, this value is required to be explicitly set.
    allowJaggedRows Boolean
    Indicates if BigQuery should accept rows that are missing trailing optional columns.
    allowQuotedNewlines Boolean
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    encoding String
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
    fieldDelimiter String
    The separator for fields in a CSV file.
    skipLeadingRows Number
    The number of rows at the top of a CSV file that BigQuery will skip when reading the data.

    TableExternalDataConfigurationGoogleSheetsOptions, TableExternalDataConfigurationGoogleSheetsOptionsArgs

    Range string
    Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"
    SkipLeadingRows int
    The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.
    Range string
    Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"
    SkipLeadingRows int
    The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.
    range String
    Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"
    skipLeadingRows Integer
    The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.
    range string
    Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"
    skipLeadingRows number
    The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.
    range str
    Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"
    skip_leading_rows int
    The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.
    range String
    Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"
    skipLeadingRows Number
    The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.

    TableExternalDataConfigurationHivePartitioningOptions, TableExternalDataConfigurationHivePartitioningOptionsArgs

    Mode string
    When set, what mode of hive partitioning to use when reading data. The following modes are supported.

    • AUTO: automatically infer partition key name(s) and type(s).
    • STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
    • CUSTOM: when set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    SourceUriPrefix string
    When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    Mode string
    When set, what mode of hive partitioning to use when reading data. The following modes are supported.

    • AUTO: automatically infer partition key name(s) and type(s).
    • STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
    • CUSTOM: when set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    SourceUriPrefix string
    When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    mode String
    When set, what mode of hive partitioning to use when reading data. The following modes are supported.

    • AUTO: automatically infer partition key name(s) and type(s).
    • STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
    • CUSTOM: when set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    sourceUriPrefix String
    When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    mode string
    When set, what mode of hive partitioning to use when reading data. The following modes are supported.

    • AUTO: automatically infer partition key name(s) and type(s).
    • STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
    • CUSTOM: when set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    requirePartitionFilter boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    sourceUriPrefix string
    When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    mode str
    When set, what mode of hive partitioning to use when reading data. The following modes are supported.

    • AUTO: automatically infer partition key name(s) and type(s).
    • STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
    • CUSTOM: when set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    require_partition_filter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    source_uri_prefix str
    When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    mode String
    When set, what mode of hive partitioning to use when reading data. The following modes are supported.

    • AUTO: automatically infer partition key name(s) and type(s).
    • STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
    • CUSTOM: when set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    sourceUriPrefix String
    When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.

    TableExternalDataConfigurationJsonOptions, TableExternalDataConfigurationJsonOptionsArgs

    Encoding string
    The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
    Encoding string
    The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
    encoding String
    The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
    encoding string
    The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
    encoding str
    The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
    encoding String
    The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.

    TableExternalDataConfigurationParquetOptions, TableExternalDataConfigurationParquetOptionsArgs

    EnableListInference bool
    Indicates whether to use schema inference specifically for Parquet LIST logical type.
    EnumAsString bool
    Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    EnableListInference bool
    Indicates whether to use schema inference specifically for Parquet LIST logical type.
    EnumAsString bool
    Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    enableListInference Boolean
    Indicates whether to use schema inference specifically for Parquet LIST logical type.
    enumAsString Boolean
    Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    enableListInference boolean
    Indicates whether to use schema inference specifically for Parquet LIST logical type.
    enumAsString boolean
    Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    enable_list_inference bool
    Indicates whether to use schema inference specifically for Parquet LIST logical type.
    enum_as_string bool
    Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    enableListInference Boolean
    Indicates whether to use schema inference specifically for Parquet LIST logical type.
    enumAsString Boolean
    Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.

    TableMaterializedView, TableMaterializedViewArgs

    Query string
    A query whose result is persisted.
    AllowNonIncrementalDefinition bool
    Allow non incremental materialized view definition. The default value is false.
    EnableRefresh bool
    Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
    RefreshIntervalMs int
    The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
    Query string
    A query whose result is persisted.
    AllowNonIncrementalDefinition bool
    Allow non incremental materialized view definition. The default value is false.
    EnableRefresh bool
    Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
    RefreshIntervalMs int
    The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
    query String
    A query whose result is persisted.
    allowNonIncrementalDefinition Boolean
    Allow non incremental materialized view definition. The default value is false.
    enableRefresh Boolean
    Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
    refreshIntervalMs Integer
    The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
    query string
    A query whose result is persisted.
    allowNonIncrementalDefinition boolean
    Allow non incremental materialized view definition. The default value is false.
    enableRefresh boolean
    Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
    refreshIntervalMs number
    The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
    query str
    A query whose result is persisted.
    allow_non_incremental_definition bool
    Allow non incremental materialized view definition. The default value is false.
    enable_refresh bool
    Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
    refresh_interval_ms int
    The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
    query String
    A query whose result is persisted.
    allowNonIncrementalDefinition Boolean
    Allow non incremental materialized view definition. The default value is false.
    enableRefresh Boolean
    Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
    refreshIntervalMs Number
    The maximum frequency at which this materialized view will be refreshed. The default value is 1800000

    TableRangePartitioning, TableRangePartitioningArgs

    Field string
    The field used to determine how to create a range-based partition.
    Range TableRangePartitioningRange
    Information required to partition based on ranges. Structure is documented below.
    Field string
    The field used to determine how to create a range-based partition.
    Range TableRangePartitioningRange
    Information required to partition based on ranges. Structure is documented below.
    field String
    The field used to determine how to create a range-based partition.
    range TableRangePartitioningRange
    Information required to partition based on ranges. Structure is documented below.
    field string
    The field used to determine how to create a range-based partition.
    range TableRangePartitioningRange
    Information required to partition based on ranges. Structure is documented below.
    field str
    The field used to determine how to create a range-based partition.
    range TableRangePartitioningRange
    Information required to partition based on ranges. Structure is documented below.
    field String
    The field used to determine how to create a range-based partition.
    range Property Map
    Information required to partition based on ranges. Structure is documented below.

    TableRangePartitioningRange, TableRangePartitioningRangeArgs

    End int
    End of the range partitioning, exclusive.
    Interval int
    The width of each range within the partition.
    Start int
    Start of the range partitioning, inclusive.
    End int
    End of the range partitioning, exclusive.
    Interval int
    The width of each range within the partition.
    Start int
    Start of the range partitioning, inclusive.
    end Integer
    End of the range partitioning, exclusive.
    interval Integer
    The width of each range within the partition.
    start Integer
    Start of the range partitioning, inclusive.
    end number
    End of the range partitioning, exclusive.
    interval number
    The width of each range within the partition.
    start number
    Start of the range partitioning, inclusive.
    end int
    End of the range partitioning, exclusive.
    interval int
    The width of each range within the partition.
    start int
    Start of the range partitioning, inclusive.
    end Number
    End of the range partitioning, exclusive.
    interval Number
    The width of each range within the partition.
    start Number
    Start of the range partitioning, inclusive.

    TableTableConstraints, TableTableConstraintsArgs

    ForeignKeys List<TableTableConstraintsForeignKey>
    Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
    PrimaryKey TableTableConstraintsPrimaryKey
    Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
    ForeignKeys []TableTableConstraintsForeignKey
    Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
    PrimaryKey TableTableConstraintsPrimaryKey
    Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
    foreignKeys List<TableTableConstraintsForeignKey>
    Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
    primaryKey TableTableConstraintsPrimaryKey
    Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
    foreignKeys TableTableConstraintsForeignKey[]
    Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
    primaryKey TableTableConstraintsPrimaryKey
    Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
    foreign_keys Sequence[TableTableConstraintsForeignKey]
    Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
    primary_key TableTableConstraintsPrimaryKey
    Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
    foreignKeys List<Property Map>
    Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
    primaryKey Property Map
    Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.

    TableTableConstraintsForeignKey, TableTableConstraintsForeignKeyArgs

    ColumnReferences TableTableConstraintsForeignKeyColumnReferences
    The pair of the foreign key column and primary key column. Structure is documented below.
    ReferencedTable TableTableConstraintsForeignKeyReferencedTable
    The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
    Name string
    Set only if the foreign key constraint is named.
    ColumnReferences TableTableConstraintsForeignKeyColumnReferences
    The pair of the foreign key column and primary key column. Structure is documented below.
    ReferencedTable TableTableConstraintsForeignKeyReferencedTable
    The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
    Name string
    Set only if the foreign key constraint is named.
    columnReferences TableTableConstraintsForeignKeyColumnReferences
    The pair of the foreign key column and primary key column. Structure is documented below.
    referencedTable TableTableConstraintsForeignKeyReferencedTable
    The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
    name String
    Set only if the foreign key constraint is named.
    columnReferences TableTableConstraintsForeignKeyColumnReferences
    The pair of the foreign key column and primary key column. Structure is documented below.
    referencedTable TableTableConstraintsForeignKeyReferencedTable
    The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
    name string
    Set only if the foreign key constraint is named.
    column_references TableTableConstraintsForeignKeyColumnReferences
    The pair of the foreign key column and primary key column. Structure is documented below.
    referenced_table TableTableConstraintsForeignKeyReferencedTable
    The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
    name str
    Set only if the foreign key constraint is named.
    columnReferences Property Map
    The pair of the foreign key column and primary key column. Structure is documented below.
    referencedTable Property Map
    The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
    name String
    Set only if the foreign key constraint is named.

    TableTableConstraintsForeignKeyColumnReferences, TableTableConstraintsForeignKeyColumnReferencesArgs

    ReferencedColumn string
    The column in the primary key that are referenced by the referencingColumn
    ReferencingColumn string
    The column that composes the foreign key.
    ReferencedColumn string
    The column in the primary key that are referenced by the referencingColumn
    ReferencingColumn string
    The column that composes the foreign key.
    referencedColumn String
    The column in the primary key that are referenced by the referencingColumn
    referencingColumn String
    The column that composes the foreign key.
    referencedColumn string
    The column in the primary key that are referenced by the referencingColumn
    referencingColumn string
    The column that composes the foreign key.
    referenced_column str
    The column in the primary key that are referenced by the referencingColumn
    referencing_column str
    The column that composes the foreign key.
    referencedColumn String
    The column in the primary key that are referenced by the referencingColumn
    referencingColumn String
    The column that composes the foreign key.

    TableTableConstraintsForeignKeyReferencedTable, TableTableConstraintsForeignKeyReferencedTableArgs

    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    TableId string
    The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    TableId string
    The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.
    tableId String
    The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
    datasetId string
    The ID of the dataset containing this table.
    projectId string
    The ID of the project containing this table.
    tableId string
    The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
    dataset_id str
    The ID of the dataset containing this table.
    project_id str
    The ID of the project containing this table.
    table_id str
    The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.
    tableId String
    The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.

    TableTableConstraintsPrimaryKey, TableTableConstraintsPrimaryKeyArgs

    Columns List<string>
    The columns that are composed of the primary key constraint.
    Columns []string
    The columns that are composed of the primary key constraint.
    columns List<String>
    The columns that are composed of the primary key constraint.
    columns string[]
    The columns that are composed of the primary key constraint.
    columns Sequence[str]
    The columns that are composed of the primary key constraint.
    columns List<String>
    The columns that are composed of the primary key constraint.

    TableTableReplicationInfo, TableTableReplicationInfoArgs

    SourceDatasetId string
    The ID of the source dataset.
    SourceProjectId string
    The ID of the source project.
    SourceTableId string
    The ID of the source materialized view.
    ReplicationIntervalMs int
    The interval at which the source materialized view is polled for updates. The default is 300000.
    SourceDatasetId string
    The ID of the source dataset.
    SourceProjectId string
    The ID of the source project.
    SourceTableId string
    The ID of the source materialized view.
    ReplicationIntervalMs int
    The interval at which the source materialized view is polled for updates. The default is 300000.
    sourceDatasetId String
    The ID of the source dataset.
    sourceProjectId String
    The ID of the source project.
    sourceTableId String
    The ID of the source materialized view.
    replicationIntervalMs Integer
    The interval at which the source materialized view is polled for updates. The default is 300000.
    sourceDatasetId string
    The ID of the source dataset.
    sourceProjectId string
    The ID of the source project.
    sourceTableId string
    The ID of the source materialized view.
    replicationIntervalMs number
    The interval at which the source materialized view is polled for updates. The default is 300000.
    source_dataset_id str
    The ID of the source dataset.
    source_project_id str
    The ID of the source project.
    source_table_id str
    The ID of the source materialized view.
    replication_interval_ms int
    The interval at which the source materialized view is polled for updates. The default is 300000.
    sourceDatasetId String
    The ID of the source dataset.
    sourceProjectId String
    The ID of the source project.
    sourceTableId String
    The ID of the source materialized view.
    replicationIntervalMs Number
    The interval at which the source materialized view is polled for updates. The default is 300000.

    TableTimePartitioning, TableTimePartitioningArgs

    Type string
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    ExpirationMs int
    Number of milliseconds for which to keep the storage for a partition.
    Field string
    The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. require_partition_filter is deprecated and will be removed in a future major release. Use the top level field with the same name instead.

    Deprecated: This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.

    Type string
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    ExpirationMs int
    Number of milliseconds for which to keep the storage for a partition.
    Field string
    The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. require_partition_filter is deprecated and will be removed in a future major release. Use the top level field with the same name instead.

    Deprecated: This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.

    type String
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    expirationMs Integer
    Number of milliseconds for which to keep the storage for a partition.
    field String
    The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. require_partition_filter is deprecated and will be removed in a future major release. Use the top level field with the same name instead.

    Deprecated: This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.

    type string
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    expirationMs number
    Number of milliseconds for which to keep the storage for a partition.
    field string
    The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
    requirePartitionFilter boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. require_partition_filter is deprecated and will be removed in a future major release. Use the top level field with the same name instead.

    Deprecated: This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.

    type str
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    expiration_ms int
    Number of milliseconds for which to keep the storage for a partition.
    field str
    The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
    require_partition_filter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. require_partition_filter is deprecated and will be removed in a future major release. Use the top level field with the same name instead.

    Deprecated: This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.

    type String
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    expirationMs Number
    Number of milliseconds for which to keep the storage for a partition.
    field String
    The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. require_partition_filter is deprecated and will be removed in a future major release. Use the top level field with the same name instead.

    Deprecated: This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.

    TableView, TableViewArgs

    Query string
    A query that BigQuery executes when the view is referenced.
    UseLegacySql bool
    Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
    Query string
    A query that BigQuery executes when the view is referenced.
    UseLegacySql bool
    Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
    query String
    A query that BigQuery executes when the view is referenced.
    useLegacySql Boolean
    Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
    query string
    A query that BigQuery executes when the view is referenced.
    useLegacySql boolean
    Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
    query str
    A query that BigQuery executes when the view is referenced.
    use_legacy_sql bool
    Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
    query String
    A query that BigQuery executes when the view is referenced.
    useLegacySql Boolean
    Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.

    Import

    BigQuery tables can be imported using any of these accepted formats:

    • projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}

    • {{project}}/{{dataset_id}}/{{table_id}}

    • {{dataset_id}}/{{table_id}}

    When using the pulumi import command, BigQuery tables can be imported using one of the formats above. For example:

    $ pulumi import gcp:bigquery/table:Table default projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
    
    $ pulumi import gcp:bigquery/table:Table default {{project}}/{{dataset_id}}/{{table_id}}
    
    $ pulumi import gcp:bigquery/table:Table default {{dataset_id}}/{{table_id}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v8.10.0 published on Wednesday, Nov 20, 2024 by Pulumi