1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. bigquery
  5. Table
Google Cloud Classic v7.16.0 published on Wednesday, Mar 27, 2024 by Pulumi

gcp.bigquery.Table

Explore with Pulumi AI

gcp logo
Google Cloud Classic v7.16.0 published on Wednesday, Mar 27, 2024 by Pulumi

    Creates a table resource in a dataset for Google BigQuery. For more information see the official documentation and API.

    Note: On newer versions of the provider, you must explicitly set deletion_protection=false (and run pulumi update to write the field to state) in order to destroy an instance. It is recommended to not set this field (or set it to true) until you’re ready to destroy.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const _default = new gcp.bigquery.Dataset("default", {
        datasetId: "foo",
        friendlyName: "test",
        description: "This is a test description",
        location: "EU",
        defaultTableExpirationMs: 3600000,
        labels: {
            env: "default",
        },
    });
    const defaultTable = new gcp.bigquery.Table("default", {
        datasetId: _default.datasetId,
        tableId: "bar",
        timePartitioning: {
            type: "DAY",
        },
        labels: {
            env: "default",
        },
        schema: `[
      {
        "name": "permalink",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "The Permalink"
      },
      {
        "name": "state",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "State where the head office is located"
      }
    ]
    `,
    });
    const sheet = new gcp.bigquery.Table("sheet", {
        datasetId: _default.datasetId,
        tableId: "sheet",
        externalDataConfiguration: {
            autodetect: true,
            sourceFormat: "GOOGLE_SHEETS",
            googleSheetsOptions: {
                skipLeadingRows: 1,
            },
            sourceUris: ["https://docs.google.com/spreadsheets/d/123456789012345"],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    default = gcp.bigquery.Dataset("default",
        dataset_id="foo",
        friendly_name="test",
        description="This is a test description",
        location="EU",
        default_table_expiration_ms=3600000,
        labels={
            "env": "default",
        })
    default_table = gcp.bigquery.Table("default",
        dataset_id=default.dataset_id,
        table_id="bar",
        time_partitioning=gcp.bigquery.TableTimePartitioningArgs(
            type="DAY",
        ),
        labels={
            "env": "default",
        },
        schema="""[
      {
        "name": "permalink",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "The Permalink"
      },
      {
        "name": "state",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "State where the head office is located"
      }
    ]
    """)
    sheet = gcp.bigquery.Table("sheet",
        dataset_id=default.dataset_id,
        table_id="sheet",
        external_data_configuration=gcp.bigquery.TableExternalDataConfigurationArgs(
            autodetect=True,
            source_format="GOOGLE_SHEETS",
            google_sheets_options=gcp.bigquery.TableExternalDataConfigurationGoogleSheetsOptionsArgs(
                skip_leading_rows=1,
            ),
            source_uris=["https://docs.google.com/spreadsheets/d/123456789012345"],
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := bigquery.NewDataset(ctx, "default", &bigquery.DatasetArgs{
    			DatasetId:                pulumi.String("foo"),
    			FriendlyName:             pulumi.String("test"),
    			Description:              pulumi.String("This is a test description"),
    			Location:                 pulumi.String("EU"),
    			DefaultTableExpirationMs: pulumi.Int(3600000),
    			Labels: pulumi.StringMap{
    				"env": pulumi.String("default"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewTable(ctx, "default", &bigquery.TableArgs{
    			DatasetId: _default.DatasetId,
    			TableId:   pulumi.String("bar"),
    			TimePartitioning: &bigquery.TableTimePartitioningArgs{
    				Type: pulumi.String("DAY"),
    			},
    			Labels: pulumi.StringMap{
    				"env": pulumi.String("default"),
    			},
    			Schema: pulumi.String(`[
      {
        "name": "permalink",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "The Permalink"
      },
      {
        "name": "state",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "State where the head office is located"
      }
    ]
    `),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewTable(ctx, "sheet", &bigquery.TableArgs{
    			DatasetId: _default.DatasetId,
    			TableId:   pulumi.String("sheet"),
    			ExternalDataConfiguration: &bigquery.TableExternalDataConfigurationArgs{
    				Autodetect:   pulumi.Bool(true),
    				SourceFormat: pulumi.String("GOOGLE_SHEETS"),
    				GoogleSheetsOptions: &bigquery.TableExternalDataConfigurationGoogleSheetsOptionsArgs{
    					SkipLeadingRows: pulumi.Int(1),
    				},
    				SourceUris: pulumi.StringArray{
    					pulumi.String("https://docs.google.com/spreadsheets/d/123456789012345"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var @default = new Gcp.BigQuery.Dataset("default", new()
        {
            DatasetId = "foo",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "EU",
            DefaultTableExpirationMs = 3600000,
            Labels = 
            {
                { "env", "default" },
            },
        });
    
        var defaultTable = new Gcp.BigQuery.Table("default", new()
        {
            DatasetId = @default.DatasetId,
            TableId = "bar",
            TimePartitioning = new Gcp.BigQuery.Inputs.TableTimePartitioningArgs
            {
                Type = "DAY",
            },
            Labels = 
            {
                { "env", "default" },
            },
            Schema = @"[
      {
        ""name"": ""permalink"",
        ""type"": ""STRING"",
        ""mode"": ""NULLABLE"",
        ""description"": ""The Permalink""
      },
      {
        ""name"": ""state"",
        ""type"": ""STRING"",
        ""mode"": ""NULLABLE"",
        ""description"": ""State where the head office is located""
      }
    ]
    ",
        });
    
        var sheet = new Gcp.BigQuery.Table("sheet", new()
        {
            DatasetId = @default.DatasetId,
            TableId = "sheet",
            ExternalDataConfiguration = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationArgs
            {
                Autodetect = true,
                SourceFormat = "GOOGLE_SHEETS",
                GoogleSheetsOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationGoogleSheetsOptionsArgs
                {
                    SkipLeadingRows = 1,
                },
                SourceUris = new[]
                {
                    "https://docs.google.com/spreadsheets/d/123456789012345",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.bigquery.inputs.TableTimePartitioningArgs;
    import com.pulumi.gcp.bigquery.inputs.TableExternalDataConfigurationArgs;
    import com.pulumi.gcp.bigquery.inputs.TableExternalDataConfigurationGoogleSheetsOptionsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var default_ = new Dataset("default", DatasetArgs.builder()        
                .datasetId("foo")
                .friendlyName("test")
                .description("This is a test description")
                .location("EU")
                .defaultTableExpirationMs(3600000)
                .labels(Map.of("env", "default"))
                .build());
    
            var defaultTable = new Table("defaultTable", TableArgs.builder()        
                .datasetId(default_.datasetId())
                .tableId("bar")
                .timePartitioning(TableTimePartitioningArgs.builder()
                    .type("DAY")
                    .build())
                .labels(Map.of("env", "default"))
                .schema("""
    [
      {
        "name": "permalink",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "The Permalink"
      },
      {
        "name": "state",
        "type": "STRING",
        "mode": "NULLABLE",
        "description": "State where the head office is located"
      }
    ]
                """)
                .build());
    
            var sheet = new Table("sheet", TableArgs.builder()        
                .datasetId(default_.datasetId())
                .tableId("sheet")
                .externalDataConfiguration(TableExternalDataConfigurationArgs.builder()
                    .autodetect(true)
                    .sourceFormat("GOOGLE_SHEETS")
                    .googleSheetsOptions(TableExternalDataConfigurationGoogleSheetsOptionsArgs.builder()
                        .skipLeadingRows(1)
                        .build())
                    .sourceUris("https://docs.google.com/spreadsheets/d/123456789012345")
                    .build())
                .build());
    
        }
    }
    
    resources:
      default:
        type: gcp:bigquery:Dataset
        properties:
          datasetId: foo
          friendlyName: test
          description: This is a test description
          location: EU
          defaultTableExpirationMs: 3.6e+06
          labels:
            env: default
      defaultTable:
        type: gcp:bigquery:Table
        name: default
        properties:
          datasetId: ${default.datasetId}
          tableId: bar
          timePartitioning:
            type: DAY
          labels:
            env: default
          schema: |
            [
              {
                "name": "permalink",
                "type": "STRING",
                "mode": "NULLABLE",
                "description": "The Permalink"
              },
              {
                "name": "state",
                "type": "STRING",
                "mode": "NULLABLE",
                "description": "State where the head office is located"
              }
            ]        
      sheet:
        type: gcp:bigquery:Table
        properties:
          datasetId: ${default.datasetId}
          tableId: sheet
          externalDataConfiguration:
            autodetect: true
            sourceFormat: GOOGLE_SHEETS
            googleSheetsOptions:
              skipLeadingRows: 1
            sourceUris:
              - https://docs.google.com/spreadsheets/d/123456789012345
    

    Create Table Resource

    new Table(name: string, args: TableArgs, opts?: CustomResourceOptions);
    @overload
    def Table(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              clusterings: Optional[Sequence[str]] = None,
              dataset_id: Optional[str] = None,
              deletion_protection: Optional[bool] = None,
              description: Optional[str] = None,
              encryption_configuration: Optional[TableEncryptionConfigurationArgs] = None,
              expiration_time: Optional[int] = None,
              external_data_configuration: Optional[TableExternalDataConfigurationArgs] = None,
              friendly_name: Optional[str] = None,
              labels: Optional[Mapping[str, str]] = None,
              materialized_view: Optional[TableMaterializedViewArgs] = None,
              max_staleness: Optional[str] = None,
              project: Optional[str] = None,
              range_partitioning: Optional[TableRangePartitioningArgs] = None,
              require_partition_filter: Optional[bool] = None,
              schema: Optional[str] = None,
              table_constraints: Optional[TableTableConstraintsArgs] = None,
              table_id: Optional[str] = None,
              table_replication_info: Optional[TableTableReplicationInfoArgs] = None,
              time_partitioning: Optional[TableTimePartitioningArgs] = None,
              view: Optional[TableViewArgs] = None)
    @overload
    def Table(resource_name: str,
              args: TableArgs,
              opts: Optional[ResourceOptions] = None)
    func NewTable(ctx *Context, name string, args TableArgs, opts ...ResourceOption) (*Table, error)
    public Table(string name, TableArgs args, CustomResourceOptions? opts = null)
    public Table(String name, TableArgs args)
    public Table(String name, TableArgs args, CustomResourceOptions options)
    
    type: gcp:bigquery:Table
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args TableArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args TableArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args TableArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args TableArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args TableArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Table Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Table resource accepts the following input properties:

    DatasetId string
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    TableId string
    A unique ID for the resource. Changing this forces a new resource to be created.
    Clusterings List<string>
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    DeletionProtection bool
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    Description string
    The field description.
    EncryptionConfiguration TableEncryptionConfiguration
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    ExpirationTime int
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    ExternalDataConfiguration TableExternalDataConfiguration
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    FriendlyName string
    A descriptive name for the table.
    Labels Dictionary<string, string>

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    MaterializedView TableMaterializedView
    If specified, configures this table as a materialized view. Structure is documented below.
    MaxStaleness string
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    RangePartitioning TableRangePartitioning
    If specified, configures range-based partitioning for this table. Structure is documented below.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    Schema string

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    TableConstraints TableTableConstraints
    Defines the primary key and foreign keys. Structure is documented below.
    TableReplicationInfo TableTableReplicationInfo
    Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".
    TimePartitioning TableTimePartitioning
    If specified, configures time-based partitioning for this table. Structure is documented below.
    View TableView
    If specified, configures this table as a view. Structure is documented below.
    DatasetId string
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    TableId string
    A unique ID for the resource. Changing this forces a new resource to be created.
    Clusterings []string
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    DeletionProtection bool
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    Description string
    The field description.
    EncryptionConfiguration TableEncryptionConfigurationArgs
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    ExpirationTime int
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    ExternalDataConfiguration TableExternalDataConfigurationArgs
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    FriendlyName string
    A descriptive name for the table.
    Labels map[string]string

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    MaterializedView TableMaterializedViewArgs
    If specified, configures this table as a materialized view. Structure is documented below.
    MaxStaleness string
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    RangePartitioning TableRangePartitioningArgs
    If specified, configures range-based partitioning for this table. Structure is documented below.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    Schema string

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    TableConstraints TableTableConstraintsArgs
    Defines the primary key and foreign keys. Structure is documented below.
    TableReplicationInfo TableTableReplicationInfoArgs
    Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".
    TimePartitioning TableTimePartitioningArgs
    If specified, configures time-based partitioning for this table. Structure is documented below.
    View TableViewArgs
    If specified, configures this table as a view. Structure is documented below.
    datasetId String
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    tableId String
    A unique ID for the resource. Changing this forces a new resource to be created.
    clusterings List<String>
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    deletionProtection Boolean
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description String
    The field description.
    encryptionConfiguration TableEncryptionConfiguration
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    expirationTime Integer
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    externalDataConfiguration TableExternalDataConfiguration
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendlyName String
    A descriptive name for the table.
    labels Map<String,String>

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    materializedView TableMaterializedView
    If specified, configures this table as a materialized view. Structure is documented below.
    maxStaleness String
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    rangePartitioning TableRangePartitioning
    If specified, configures range-based partitioning for this table. Structure is documented below.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    schema String

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    tableConstraints TableTableConstraints
    Defines the primary key and foreign keys. Structure is documented below.
    tableReplicationInfo TableTableReplicationInfo
    Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".
    timePartitioning TableTimePartitioning
    If specified, configures time-based partitioning for this table. Structure is documented below.
    view TableView
    If specified, configures this table as a view. Structure is documented below.
    datasetId string
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    tableId string
    A unique ID for the resource. Changing this forces a new resource to be created.
    clusterings string[]
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    deletionProtection boolean
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description string
    The field description.
    encryptionConfiguration TableEncryptionConfiguration
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    expirationTime number
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    externalDataConfiguration TableExternalDataConfiguration
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendlyName string
    A descriptive name for the table.
    labels {[key: string]: string}

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    materializedView TableMaterializedView
    If specified, configures this table as a materialized view. Structure is documented below.
    maxStaleness string
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    rangePartitioning TableRangePartitioning
    If specified, configures range-based partitioning for this table. Structure is documented below.
    requirePartitionFilter boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    schema string

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    tableConstraints TableTableConstraints
    Defines the primary key and foreign keys. Structure is documented below.
    tableReplicationInfo TableTableReplicationInfo
    Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".
    timePartitioning TableTimePartitioning
    If specified, configures time-based partitioning for this table. Structure is documented below.
    view TableView
    If specified, configures this table as a view. Structure is documented below.
    dataset_id str
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    table_id str
    A unique ID for the resource. Changing this forces a new resource to be created.
    clusterings Sequence[str]
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    deletion_protection bool
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description str
    The field description.
    encryption_configuration TableEncryptionConfigurationArgs
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    expiration_time int
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    external_data_configuration TableExternalDataConfigurationArgs
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendly_name str
    A descriptive name for the table.
    labels Mapping[str, str]

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    materialized_view TableMaterializedViewArgs
    If specified, configures this table as a materialized view. Structure is documented below.
    max_staleness str
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    range_partitioning TableRangePartitioningArgs
    If specified, configures range-based partitioning for this table. Structure is documented below.
    require_partition_filter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    schema str

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    table_constraints TableTableConstraintsArgs
    Defines the primary key and foreign keys. Structure is documented below.
    table_replication_info TableTableReplicationInfoArgs
    Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".
    time_partitioning TableTimePartitioningArgs
    If specified, configures time-based partitioning for this table. Structure is documented below.
    view TableViewArgs
    If specified, configures this table as a view. Structure is documented below.
    datasetId String
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    tableId String
    A unique ID for the resource. Changing this forces a new resource to be created.
    clusterings List<String>
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    deletionProtection Boolean
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description String
    The field description.
    encryptionConfiguration Property Map
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    expirationTime Number
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    externalDataConfiguration Property Map
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendlyName String
    A descriptive name for the table.
    labels Map<String>

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    materializedView Property Map
    If specified, configures this table as a materialized view. Structure is documented below.
    maxStaleness String
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    rangePartitioning Property Map
    If specified, configures range-based partitioning for this table. Structure is documented below.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    schema String

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    tableConstraints Property Map
    Defines the primary key and foreign keys. Structure is documented below.
    tableReplicationInfo Property Map
    Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".
    timePartitioning Property Map
    If specified, configures time-based partitioning for this table. Structure is documented below.
    view Property Map
    If specified, configures this table as a view. Structure is documented below.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Table resource produces the following output properties:

    CreationTime int
    The time when this table was created, in milliseconds since the epoch.
    EffectiveLabels Dictionary<string, string>

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    Etag string
    A hash of the resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    LastModifiedTime int
    The time when this table was last modified, in milliseconds since the epoch.
    Location string
    The geographic location where the table resides. This value is inherited from the dataset.
    NumBytes int
    The size of this table in bytes, excluding any data in the streaming buffer.
    NumLongTermBytes int
    The number of bytes in the table that are considered "long-term storage".
    NumRows int
    The number of rows of data in this table, excluding any data in the streaming buffer.
    PulumiLabels Dictionary<string, string>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    SelfLink string
    The URI of the created resource.
    Type string
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    CreationTime int
    The time when this table was created, in milliseconds since the epoch.
    EffectiveLabels map[string]string

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    Etag string
    A hash of the resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    LastModifiedTime int
    The time when this table was last modified, in milliseconds since the epoch.
    Location string
    The geographic location where the table resides. This value is inherited from the dataset.
    NumBytes int
    The size of this table in bytes, excluding any data in the streaming buffer.
    NumLongTermBytes int
    The number of bytes in the table that are considered "long-term storage".
    NumRows int
    The number of rows of data in this table, excluding any data in the streaming buffer.
    PulumiLabels map[string]string
    The combination of labels configured directly on the resource and default labels configured on the provider.
    SelfLink string
    The URI of the created resource.
    Type string
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    creationTime Integer
    The time when this table was created, in milliseconds since the epoch.
    effectiveLabels Map<String,String>

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    etag String
    A hash of the resource.
    id String
    The provider-assigned unique ID for this managed resource.
    lastModifiedTime Integer
    The time when this table was last modified, in milliseconds since the epoch.
    location String
    The geographic location where the table resides. This value is inherited from the dataset.
    numBytes Integer
    The size of this table in bytes, excluding any data in the streaming buffer.
    numLongTermBytes Integer
    The number of bytes in the table that are considered "long-term storage".
    numRows Integer
    The number of rows of data in this table, excluding any data in the streaming buffer.
    pulumiLabels Map<String,String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    selfLink String
    The URI of the created resource.
    type String
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    creationTime number
    The time when this table was created, in milliseconds since the epoch.
    effectiveLabels {[key: string]: string}

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    etag string
    A hash of the resource.
    id string
    The provider-assigned unique ID for this managed resource.
    lastModifiedTime number
    The time when this table was last modified, in milliseconds since the epoch.
    location string
    The geographic location where the table resides. This value is inherited from the dataset.
    numBytes number
    The size of this table in bytes, excluding any data in the streaming buffer.
    numLongTermBytes number
    The number of bytes in the table that are considered "long-term storage".
    numRows number
    The number of rows of data in this table, excluding any data in the streaming buffer.
    pulumiLabels {[key: string]: string}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    selfLink string
    The URI of the created resource.
    type string
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    creation_time int
    The time when this table was created, in milliseconds since the epoch.
    effective_labels Mapping[str, str]

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    etag str
    A hash of the resource.
    id str
    The provider-assigned unique ID for this managed resource.
    last_modified_time int
    The time when this table was last modified, in milliseconds since the epoch.
    location str
    The geographic location where the table resides. This value is inherited from the dataset.
    num_bytes int
    The size of this table in bytes, excluding any data in the streaming buffer.
    num_long_term_bytes int
    The number of bytes in the table that are considered "long-term storage".
    num_rows int
    The number of rows of data in this table, excluding any data in the streaming buffer.
    pulumi_labels Mapping[str, str]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    self_link str
    The URI of the created resource.
    type str
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    creationTime Number
    The time when this table was created, in milliseconds since the epoch.
    effectiveLabels Map<String>

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    etag String
    A hash of the resource.
    id String
    The provider-assigned unique ID for this managed resource.
    lastModifiedTime Number
    The time when this table was last modified, in milliseconds since the epoch.
    location String
    The geographic location where the table resides. This value is inherited from the dataset.
    numBytes Number
    The size of this table in bytes, excluding any data in the streaming buffer.
    numLongTermBytes Number
    The number of bytes in the table that are considered "long-term storage".
    numRows Number
    The number of rows of data in this table, excluding any data in the streaming buffer.
    pulumiLabels Map<String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    selfLink String
    The URI of the created resource.
    type String
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.

    Look up Existing Table Resource

    Get an existing Table resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: TableState, opts?: CustomResourceOptions): Table
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            clusterings: Optional[Sequence[str]] = None,
            creation_time: Optional[int] = None,
            dataset_id: Optional[str] = None,
            deletion_protection: Optional[bool] = None,
            description: Optional[str] = None,
            effective_labels: Optional[Mapping[str, str]] = None,
            encryption_configuration: Optional[TableEncryptionConfigurationArgs] = None,
            etag: Optional[str] = None,
            expiration_time: Optional[int] = None,
            external_data_configuration: Optional[TableExternalDataConfigurationArgs] = None,
            friendly_name: Optional[str] = None,
            labels: Optional[Mapping[str, str]] = None,
            last_modified_time: Optional[int] = None,
            location: Optional[str] = None,
            materialized_view: Optional[TableMaterializedViewArgs] = None,
            max_staleness: Optional[str] = None,
            num_bytes: Optional[int] = None,
            num_long_term_bytes: Optional[int] = None,
            num_rows: Optional[int] = None,
            project: Optional[str] = None,
            pulumi_labels: Optional[Mapping[str, str]] = None,
            range_partitioning: Optional[TableRangePartitioningArgs] = None,
            require_partition_filter: Optional[bool] = None,
            schema: Optional[str] = None,
            self_link: Optional[str] = None,
            table_constraints: Optional[TableTableConstraintsArgs] = None,
            table_id: Optional[str] = None,
            table_replication_info: Optional[TableTableReplicationInfoArgs] = None,
            time_partitioning: Optional[TableTimePartitioningArgs] = None,
            type: Optional[str] = None,
            view: Optional[TableViewArgs] = None) -> Table
    func GetTable(ctx *Context, name string, id IDInput, state *TableState, opts ...ResourceOption) (*Table, error)
    public static Table Get(string name, Input<string> id, TableState? state, CustomResourceOptions? opts = null)
    public static Table get(String name, Output<String> id, TableState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Clusterings List<string>
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    CreationTime int
    The time when this table was created, in milliseconds since the epoch.
    DatasetId string
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    DeletionProtection bool
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    Description string
    The field description.
    EffectiveLabels Dictionary<string, string>

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    EncryptionConfiguration TableEncryptionConfiguration
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    Etag string
    A hash of the resource.
    ExpirationTime int
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    ExternalDataConfiguration TableExternalDataConfiguration
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    FriendlyName string
    A descriptive name for the table.
    Labels Dictionary<string, string>

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    LastModifiedTime int
    The time when this table was last modified, in milliseconds since the epoch.
    Location string
    The geographic location where the table resides. This value is inherited from the dataset.
    MaterializedView TableMaterializedView
    If specified, configures this table as a materialized view. Structure is documented below.
    MaxStaleness string
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    NumBytes int
    The size of this table in bytes, excluding any data in the streaming buffer.
    NumLongTermBytes int
    The number of bytes in the table that are considered "long-term storage".
    NumRows int
    The number of rows of data in this table, excluding any data in the streaming buffer.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PulumiLabels Dictionary<string, string>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    RangePartitioning TableRangePartitioning
    If specified, configures range-based partitioning for this table. Structure is documented below.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    Schema string

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    SelfLink string
    The URI of the created resource.
    TableConstraints TableTableConstraints
    Defines the primary key and foreign keys. Structure is documented below.
    TableId string
    A unique ID for the resource. Changing this forces a new resource to be created.
    TableReplicationInfo TableTableReplicationInfo
    Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".
    TimePartitioning TableTimePartitioning
    If specified, configures time-based partitioning for this table. Structure is documented below.
    Type string
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    View TableView
    If specified, configures this table as a view. Structure is documented below.
    Clusterings []string
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    CreationTime int
    The time when this table was created, in milliseconds since the epoch.
    DatasetId string
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    DeletionProtection bool
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    Description string
    The field description.
    EffectiveLabels map[string]string

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    EncryptionConfiguration TableEncryptionConfigurationArgs
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    Etag string
    A hash of the resource.
    ExpirationTime int
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    ExternalDataConfiguration TableExternalDataConfigurationArgs
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    FriendlyName string
    A descriptive name for the table.
    Labels map[string]string

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    LastModifiedTime int
    The time when this table was last modified, in milliseconds since the epoch.
    Location string
    The geographic location where the table resides. This value is inherited from the dataset.
    MaterializedView TableMaterializedViewArgs
    If specified, configures this table as a materialized view. Structure is documented below.
    MaxStaleness string
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    NumBytes int
    The size of this table in bytes, excluding any data in the streaming buffer.
    NumLongTermBytes int
    The number of bytes in the table that are considered "long-term storage".
    NumRows int
    The number of rows of data in this table, excluding any data in the streaming buffer.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PulumiLabels map[string]string
    The combination of labels configured directly on the resource and default labels configured on the provider.
    RangePartitioning TableRangePartitioningArgs
    If specified, configures range-based partitioning for this table. Structure is documented below.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    Schema string

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    SelfLink string
    The URI of the created resource.
    TableConstraints TableTableConstraintsArgs
    Defines the primary key and foreign keys. Structure is documented below.
    TableId string
    A unique ID for the resource. Changing this forces a new resource to be created.
    TableReplicationInfo TableTableReplicationInfoArgs
    Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".
    TimePartitioning TableTimePartitioningArgs
    If specified, configures time-based partitioning for this table. Structure is documented below.
    Type string
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    View TableViewArgs
    If specified, configures this table as a view. Structure is documented below.
    clusterings List<String>
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    creationTime Integer
    The time when this table was created, in milliseconds since the epoch.
    datasetId String
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    deletionProtection Boolean
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description String
    The field description.
    effectiveLabels Map<String,String>

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    encryptionConfiguration TableEncryptionConfiguration
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    etag String
    A hash of the resource.
    expirationTime Integer
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    externalDataConfiguration TableExternalDataConfiguration
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendlyName String
    A descriptive name for the table.
    labels Map<String,String>

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    lastModifiedTime Integer
    The time when this table was last modified, in milliseconds since the epoch.
    location String
    The geographic location where the table resides. This value is inherited from the dataset.
    materializedView TableMaterializedView
    If specified, configures this table as a materialized view. Structure is documented below.
    maxStaleness String
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    numBytes Integer
    The size of this table in bytes, excluding any data in the streaming buffer.
    numLongTermBytes Integer
    The number of bytes in the table that are considered "long-term storage".
    numRows Integer
    The number of rows of data in this table, excluding any data in the streaming buffer.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels Map<String,String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    rangePartitioning TableRangePartitioning
    If specified, configures range-based partitioning for this table. Structure is documented below.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    schema String

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    selfLink String
    The URI of the created resource.
    tableConstraints TableTableConstraints
    Defines the primary key and foreign keys. Structure is documented below.
    tableId String
    A unique ID for the resource. Changing this forces a new resource to be created.
    tableReplicationInfo TableTableReplicationInfo
    Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".
    timePartitioning TableTimePartitioning
    If specified, configures time-based partitioning for this table. Structure is documented below.
    type String
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    view TableView
    If specified, configures this table as a view. Structure is documented below.
    clusterings string[]
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    creationTime number
    The time when this table was created, in milliseconds since the epoch.
    datasetId string
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    deletionProtection boolean
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description string
    The field description.
    effectiveLabels {[key: string]: string}

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    encryptionConfiguration TableEncryptionConfiguration
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    etag string
    A hash of the resource.
    expirationTime number
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    externalDataConfiguration TableExternalDataConfiguration
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendlyName string
    A descriptive name for the table.
    labels {[key: string]: string}

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    lastModifiedTime number
    The time when this table was last modified, in milliseconds since the epoch.
    location string
    The geographic location where the table resides. This value is inherited from the dataset.
    materializedView TableMaterializedView
    If specified, configures this table as a materialized view. Structure is documented below.
    maxStaleness string
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    numBytes number
    The size of this table in bytes, excluding any data in the streaming buffer.
    numLongTermBytes number
    The number of bytes in the table that are considered "long-term storage".
    numRows number
    The number of rows of data in this table, excluding any data in the streaming buffer.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels {[key: string]: string}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    rangePartitioning TableRangePartitioning
    If specified, configures range-based partitioning for this table. Structure is documented below.
    requirePartitionFilter boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    schema string

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    selfLink string
    The URI of the created resource.
    tableConstraints TableTableConstraints
    Defines the primary key and foreign keys. Structure is documented below.
    tableId string
    A unique ID for the resource. Changing this forces a new resource to be created.
    tableReplicationInfo TableTableReplicationInfo
    Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".
    timePartitioning TableTimePartitioning
    If specified, configures time-based partitioning for this table. Structure is documented below.
    type string
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    view TableView
    If specified, configures this table as a view. Structure is documented below.
    clusterings Sequence[str]
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    creation_time int
    The time when this table was created, in milliseconds since the epoch.
    dataset_id str
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    deletion_protection bool
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description str
    The field description.
    effective_labels Mapping[str, str]

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    encryption_configuration TableEncryptionConfigurationArgs
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    etag str
    A hash of the resource.
    expiration_time int
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    external_data_configuration TableExternalDataConfigurationArgs
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendly_name str
    A descriptive name for the table.
    labels Mapping[str, str]

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    last_modified_time int
    The time when this table was last modified, in milliseconds since the epoch.
    location str
    The geographic location where the table resides. This value is inherited from the dataset.
    materialized_view TableMaterializedViewArgs
    If specified, configures this table as a materialized view. Structure is documented below.
    max_staleness str
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    num_bytes int
    The size of this table in bytes, excluding any data in the streaming buffer.
    num_long_term_bytes int
    The number of bytes in the table that are considered "long-term storage".
    num_rows int
    The number of rows of data in this table, excluding any data in the streaming buffer.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumi_labels Mapping[str, str]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    range_partitioning TableRangePartitioningArgs
    If specified, configures range-based partitioning for this table. Structure is documented below.
    require_partition_filter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    schema str

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    self_link str
    The URI of the created resource.
    table_constraints TableTableConstraintsArgs
    Defines the primary key and foreign keys. Structure is documented below.
    table_id str
    A unique ID for the resource. Changing this forces a new resource to be created.
    table_replication_info TableTableReplicationInfoArgs
    Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".
    time_partitioning TableTimePartitioningArgs
    If specified, configures time-based partitioning for this table. Structure is documented below.
    type str
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    view TableViewArgs
    If specified, configures this table as a view. Structure is documented below.
    clusterings List<String>
    Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
    creationTime Number
    The time when this table was created, in milliseconds since the epoch.
    datasetId String
    The dataset ID to create the table in. Changing this forces a new resource to be created.
    deletionProtection Boolean
    Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a =destroy or =update that would delete the instance will fail.
    description String
    The field description.
    effectiveLabels Map<String>

    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    • schema - (Optional) A JSON schema for the table.

    ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.

    ~>NOTE: If you use external_data_configuration documented below and do not set external_data_configuration.connection_id, schemas must be specified with external_data_configuration.schema. Otherwise, schemas must be specified with this top-level field.

    encryptionConfiguration Property Map
    Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
    etag String
    A hash of the resource.
    expirationTime Number
    The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
    externalDataConfiguration Property Map
    Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
    friendlyName String
    A descriptive name for the table.
    labels Map<String>

    A mapping of labels to assign to the resource.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.

    lastModifiedTime Number
    The time when this table was last modified, in milliseconds since the epoch.
    location String
    The geographic location where the table resides. This value is inherited from the dataset.
    materializedView Property Map
    If specified, configures this table as a materialized view. Structure is documented below.
    maxStaleness String
    The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
    numBytes Number
    The size of this table in bytes, excluding any data in the streaming buffer.
    numLongTermBytes Number
    The number of bytes in the table that are considered "long-term storage".
    numRows Number
    The number of rows of data in this table, excluding any data in the streaming buffer.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels Map<String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    rangePartitioning Property Map
    If specified, configures range-based partitioning for this table. Structure is documented below.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    schema String

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    selfLink String
    The URI of the created resource.
    tableConstraints Property Map
    Defines the primary key and foreign keys. Structure is documented below.
    tableId String
    A unique ID for the resource. Changing this forces a new resource to be created.
    tableReplicationInfo Property Map
    Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".
    timePartitioning Property Map
    If specified, configures time-based partitioning for this table. Structure is documented below.
    type String
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    view Property Map
    If specified, configures this table as a view. Structure is documented below.

    Supporting Types

    TableEncryptionConfiguration, TableEncryptionConfigurationArgs

    KmsKeyName string
    The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource.
    KmsKeyVersion string
    The self link or full name of the kms key version used to encrypt this table.
    KmsKeyName string
    The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource.
    KmsKeyVersion string
    The self link or full name of the kms key version used to encrypt this table.
    kmsKeyName String
    The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource.
    kmsKeyVersion String
    The self link or full name of the kms key version used to encrypt this table.
    kmsKeyName string
    The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource.
    kmsKeyVersion string
    The self link or full name of the kms key version used to encrypt this table.
    kms_key_name str
    The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource.
    kms_key_version str
    The self link or full name of the kms key version used to encrypt this table.
    kmsKeyName String
    The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the gcp.bigquery.getDefaultServiceAccount datasource and the gcp.kms.CryptoKeyIAMBinding resource.
    kmsKeyVersion String
    The self link or full name of the kms key version used to encrypt this table.

    TableExternalDataConfiguration, TableExternalDataConfigurationArgs

    Autodetect bool
    Let BigQuery try to autodetect the schema and format of the table.
    SourceUris List<string>
    A list of the fully-qualified URIs that point to your data in Google Cloud.
    AvroOptions TableExternalDataConfigurationAvroOptions
    Additional options if source_format is set to "AVRO". Structure is documented below.
    Compression string
    The compression type of the data source. Valid values are "NONE" or "GZIP".
    ConnectionId string

    The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}.

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    CsvOptions TableExternalDataConfigurationCsvOptions
    Additional properties to set if source_format is set to "CSV". Structure is documented below.
    FileSetSpecType string
    Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
    GoogleSheetsOptions TableExternalDataConfigurationGoogleSheetsOptions
    Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below.
    HivePartitioningOptions TableExternalDataConfigurationHivePartitioningOptions
    When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
    IgnoreUnknownValues bool
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
    JsonOptions TableExternalDataConfigurationJsonOptions
    Additional properties to set if source_format is set to "JSON". Structure is documented below.
    MaxBadRecords int
    The maximum number of bad records that BigQuery can ignore when reading data.
    MetadataCacheMode string
    Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.
    ObjectMetadata string
    Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.
    ParquetOptions TableExternalDataConfigurationParquetOptions
    Additional properties to set if source_format is set to "PARQUET". Structure is documented below.
    ReferenceFileSchemaUri string
    When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
    Schema string

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    SourceFormat string
    The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly".
    Autodetect bool
    Let BigQuery try to autodetect the schema and format of the table.
    SourceUris []string
    A list of the fully-qualified URIs that point to your data in Google Cloud.
    AvroOptions TableExternalDataConfigurationAvroOptions
    Additional options if source_format is set to "AVRO". Structure is documented below.
    Compression string
    The compression type of the data source. Valid values are "NONE" or "GZIP".
    ConnectionId string

    The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}.

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    CsvOptions TableExternalDataConfigurationCsvOptions
    Additional properties to set if source_format is set to "CSV". Structure is documented below.
    FileSetSpecType string
    Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
    GoogleSheetsOptions TableExternalDataConfigurationGoogleSheetsOptions
    Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below.
    HivePartitioningOptions TableExternalDataConfigurationHivePartitioningOptions
    When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
    IgnoreUnknownValues bool
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
    JsonOptions TableExternalDataConfigurationJsonOptions
    Additional properties to set if source_format is set to "JSON". Structure is documented below.
    MaxBadRecords int
    The maximum number of bad records that BigQuery can ignore when reading data.
    MetadataCacheMode string
    Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.
    ObjectMetadata string
    Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.
    ParquetOptions TableExternalDataConfigurationParquetOptions
    Additional properties to set if source_format is set to "PARQUET". Structure is documented below.
    ReferenceFileSchemaUri string
    When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
    Schema string

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    SourceFormat string
    The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly".
    autodetect Boolean
    Let BigQuery try to autodetect the schema and format of the table.
    sourceUris List<String>
    A list of the fully-qualified URIs that point to your data in Google Cloud.
    avroOptions TableExternalDataConfigurationAvroOptions
    Additional options if source_format is set to "AVRO". Structure is documented below.
    compression String
    The compression type of the data source. Valid values are "NONE" or "GZIP".
    connectionId String

    The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}.

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    csvOptions TableExternalDataConfigurationCsvOptions
    Additional properties to set if source_format is set to "CSV". Structure is documented below.
    fileSetSpecType String
    Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
    googleSheetsOptions TableExternalDataConfigurationGoogleSheetsOptions
    Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below.
    hivePartitioningOptions TableExternalDataConfigurationHivePartitioningOptions
    When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
    ignoreUnknownValues Boolean
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
    jsonOptions TableExternalDataConfigurationJsonOptions
    Additional properties to set if source_format is set to "JSON". Structure is documented below.
    maxBadRecords Integer
    The maximum number of bad records that BigQuery can ignore when reading data.
    metadataCacheMode String
    Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.
    objectMetadata String
    Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.
    parquetOptions TableExternalDataConfigurationParquetOptions
    Additional properties to set if source_format is set to "PARQUET". Structure is documented below.
    referenceFileSchemaUri String
    When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
    schema String

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    sourceFormat String
    The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly".
    autodetect boolean
    Let BigQuery try to autodetect the schema and format of the table.
    sourceUris string[]
    A list of the fully-qualified URIs that point to your data in Google Cloud.
    avroOptions TableExternalDataConfigurationAvroOptions
    Additional options if source_format is set to "AVRO". Structure is documented below.
    compression string
    The compression type of the data source. Valid values are "NONE" or "GZIP".
    connectionId string

    The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}.

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    csvOptions TableExternalDataConfigurationCsvOptions
    Additional properties to set if source_format is set to "CSV". Structure is documented below.
    fileSetSpecType string
    Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
    googleSheetsOptions TableExternalDataConfigurationGoogleSheetsOptions
    Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below.
    hivePartitioningOptions TableExternalDataConfigurationHivePartitioningOptions
    When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
    ignoreUnknownValues boolean
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
    jsonOptions TableExternalDataConfigurationJsonOptions
    Additional properties to set if source_format is set to "JSON". Structure is documented below.
    maxBadRecords number
    The maximum number of bad records that BigQuery can ignore when reading data.
    metadataCacheMode string
    Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.
    objectMetadata string
    Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.
    parquetOptions TableExternalDataConfigurationParquetOptions
    Additional properties to set if source_format is set to "PARQUET". Structure is documented below.
    referenceFileSchemaUri string
    When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
    schema string

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    sourceFormat string
    The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly".
    autodetect bool
    Let BigQuery try to autodetect the schema and format of the table.
    source_uris Sequence[str]
    A list of the fully-qualified URIs that point to your data in Google Cloud.
    avro_options TableExternalDataConfigurationAvroOptions
    Additional options if source_format is set to "AVRO". Structure is documented below.
    compression str
    The compression type of the data source. Valid values are "NONE" or "GZIP".
    connection_id str

    The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}.

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    csv_options TableExternalDataConfigurationCsvOptions
    Additional properties to set if source_format is set to "CSV". Structure is documented below.
    file_set_spec_type str
    Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
    google_sheets_options TableExternalDataConfigurationGoogleSheetsOptions
    Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below.
    hive_partitioning_options TableExternalDataConfigurationHivePartitioningOptions
    When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
    ignore_unknown_values bool
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
    json_options TableExternalDataConfigurationJsonOptions
    Additional properties to set if source_format is set to "JSON". Structure is documented below.
    max_bad_records int
    The maximum number of bad records that BigQuery can ignore when reading data.
    metadata_cache_mode str
    Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.
    object_metadata str
    Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.
    parquet_options TableExternalDataConfigurationParquetOptions
    Additional properties to set if source_format is set to "PARQUET". Structure is documented below.
    reference_file_schema_uri str
    When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
    schema str

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    source_format str
    The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly".
    autodetect Boolean
    Let BigQuery try to autodetect the schema and format of the table.
    sourceUris List<String>
    A list of the fully-qualified URIs that point to your data in Google Cloud.
    avroOptions Property Map
    Additional options if source_format is set to "AVRO". Structure is documented below.
    compression String
    The compression type of the data source. Valid values are "NONE" or "GZIP".
    connectionId String

    The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection_id can have the form {{project}}.{{location}}.{{connection_id}} or projects/{{project}}/locations/{{location}}/connections/{{connection_id}}.

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    csvOptions Property Map
    Additional properties to set if source_format is set to "CSV". Structure is documented below.
    fileSetSpecType String
    Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
    googleSheetsOptions Property Map
    Additional options if source_format is set to "GOOGLE_SHEETS". Structure is documented below.
    hivePartitioningOptions Property Map
    When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
    ignoreUnknownValues Boolean
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
    jsonOptions Property Map
    Additional properties to set if source_format is set to "JSON". Structure is documented below.
    maxBadRecords Number
    The maximum number of bad records that BigQuery can ignore when reading data.
    metadataCacheMode String
    Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are AUTOMATIC and MANUAL.
    objectMetadata String
    Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If object_metadata is set, source_format should be omitted.
    parquetOptions Property Map
    Additional properties to set if source_format is set to "PARQUET". Structure is documented below.
    referenceFileSchemaUri String
    When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
    schema String

    A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in google_bigquery_table.schema

    ~>NOTE: If you set external_data_configuration.connection_id, the table schema must be specified using the top-level schema field documented above.

    sourceFormat String
    The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the scopes must include "https://www.googleapis.com/auth/drive.readonly".

    TableExternalDataConfigurationAvroOptions, TableExternalDataConfigurationAvroOptionsArgs

    UseAvroLogicalTypes bool
    If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
    UseAvroLogicalTypes bool
    If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
    useAvroLogicalTypes Boolean
    If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
    useAvroLogicalTypes boolean
    If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
    use_avro_logical_types bool
    If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
    useAvroLogicalTypes Boolean
    If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).

    TableExternalDataConfigurationCsvOptions, TableExternalDataConfigurationCsvOptionsArgs

    Quote string
    The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in the provider escaped as \". Due to limitations with default values, this value is required to be explicitly set.
    AllowJaggedRows bool
    Indicates if BigQuery should accept rows that are missing trailing optional columns.
    AllowQuotedNewlines bool
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    Encoding string
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
    FieldDelimiter string
    The separator for fields in a CSV file.
    SkipLeadingRows int
    The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
    Quote string
    The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in the provider escaped as \". Due to limitations with default values, this value is required to be explicitly set.
    AllowJaggedRows bool
    Indicates if BigQuery should accept rows that are missing trailing optional columns.
    AllowQuotedNewlines bool
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    Encoding string
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
    FieldDelimiter string
    The separator for fields in a CSV file.
    SkipLeadingRows int
    The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
    quote String
    The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in the provider escaped as \". Due to limitations with default values, this value is required to be explicitly set.
    allowJaggedRows Boolean
    Indicates if BigQuery should accept rows that are missing trailing optional columns.
    allowQuotedNewlines Boolean
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    encoding String
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
    fieldDelimiter String
    The separator for fields in a CSV file.
    skipLeadingRows Integer
    The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
    quote string
    The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in the provider escaped as \". Due to limitations with default values, this value is required to be explicitly set.
    allowJaggedRows boolean
    Indicates if BigQuery should accept rows that are missing trailing optional columns.
    allowQuotedNewlines boolean
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    encoding string
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
    fieldDelimiter string
    The separator for fields in a CSV file.
    skipLeadingRows number
    The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
    quote str
    The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in the provider escaped as \". Due to limitations with default values, this value is required to be explicitly set.
    allow_jagged_rows bool
    Indicates if BigQuery should accept rows that are missing trailing optional columns.
    allow_quoted_newlines bool
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    encoding str
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
    field_delimiter str
    The separator for fields in a CSV file.
    skip_leading_rows int
    The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
    quote String
    The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in the provider escaped as \". Due to limitations with default values, this value is required to be explicitly set.
    allowJaggedRows Boolean
    Indicates if BigQuery should accept rows that are missing trailing optional columns.
    allowQuotedNewlines Boolean
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    encoding String
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
    fieldDelimiter String
    The separator for fields in a CSV file.
    skipLeadingRows Number
    The number of rows at the top of a CSV file that BigQuery will skip when reading the data.

    TableExternalDataConfigurationGoogleSheetsOptions, TableExternalDataConfigurationGoogleSheetsOptionsArgs

    Range string
    Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"
    SkipLeadingRows int
    The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.
    Range string
    Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"
    SkipLeadingRows int
    The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.
    range String
    Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"
    skipLeadingRows Integer
    The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.
    range string
    Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"
    skipLeadingRows number
    The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.
    range str
    Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"
    skip_leading_rows int
    The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.
    range String
    Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"
    skipLeadingRows Number
    The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.

    TableExternalDataConfigurationHivePartitioningOptions, TableExternalDataConfigurationHivePartitioningOptionsArgs

    Mode string
    When set, what mode of hive partitioning to use when reading data. The following modes are supported.

    • AUTO: automatically infer partition key name(s) and type(s).
    • STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
    • CUSTOM: when set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    SourceUriPrefix string
    When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    Mode string
    When set, what mode of hive partitioning to use when reading data. The following modes are supported.

    • AUTO: automatically infer partition key name(s) and type(s).
    • STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
    • CUSTOM: when set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    SourceUriPrefix string
    When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    mode String
    When set, what mode of hive partitioning to use when reading data. The following modes are supported.

    • AUTO: automatically infer partition key name(s) and type(s).
    • STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
    • CUSTOM: when set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    sourceUriPrefix String
    When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    mode string
    When set, what mode of hive partitioning to use when reading data. The following modes are supported.

    • AUTO: automatically infer partition key name(s) and type(s).
    • STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
    • CUSTOM: when set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    requirePartitionFilter boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    sourceUriPrefix string
    When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    mode str
    When set, what mode of hive partitioning to use when reading data. The following modes are supported.

    • AUTO: automatically infer partition key name(s) and type(s).
    • STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
    • CUSTOM: when set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    require_partition_filter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    source_uri_prefix str
    When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    mode String
    When set, what mode of hive partitioning to use when reading data. The following modes are supported.

    • AUTO: automatically infer partition key name(s) and type(s).
    • STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
    • CUSTOM: when set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
    sourceUriPrefix String
    When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/. Note that when mode is set to CUSTOM, you must encode the partition key schema within the source_uri_prefix by setting source_uri_prefix to gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}.

    TableExternalDataConfigurationJsonOptions, TableExternalDataConfigurationJsonOptionsArgs

    Encoding string
    The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
    Encoding string
    The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
    encoding String
    The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
    encoding string
    The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
    encoding str
    The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
    encoding String
    The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.

    TableExternalDataConfigurationParquetOptions, TableExternalDataConfigurationParquetOptionsArgs

    EnableListInference bool
    Indicates whether to use schema inference specifically for Parquet LIST logical type.
    EnumAsString bool
    Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    EnableListInference bool
    Indicates whether to use schema inference specifically for Parquet LIST logical type.
    EnumAsString bool
    Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    enableListInference Boolean
    Indicates whether to use schema inference specifically for Parquet LIST logical type.
    enumAsString Boolean
    Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    enableListInference boolean
    Indicates whether to use schema inference specifically for Parquet LIST logical type.
    enumAsString boolean
    Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    enable_list_inference bool
    Indicates whether to use schema inference specifically for Parquet LIST logical type.
    enum_as_string bool
    Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    enableListInference Boolean
    Indicates whether to use schema inference specifically for Parquet LIST logical type.
    enumAsString Boolean
    Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.

    TableMaterializedView, TableMaterializedViewArgs

    Query string
    A query whose result is persisted.
    AllowNonIncrementalDefinition bool
    Allow non incremental materialized view definition. The default value is false.
    EnableRefresh bool
    Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
    RefreshIntervalMs int
    The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
    Query string
    A query whose result is persisted.
    AllowNonIncrementalDefinition bool
    Allow non incremental materialized view definition. The default value is false.
    EnableRefresh bool
    Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
    RefreshIntervalMs int
    The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
    query String
    A query whose result is persisted.
    allowNonIncrementalDefinition Boolean
    Allow non incremental materialized view definition. The default value is false.
    enableRefresh Boolean
    Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
    refreshIntervalMs Integer
    The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
    query string
    A query whose result is persisted.
    allowNonIncrementalDefinition boolean
    Allow non incremental materialized view definition. The default value is false.
    enableRefresh boolean
    Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
    refreshIntervalMs number
    The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
    query str
    A query whose result is persisted.
    allow_non_incremental_definition bool
    Allow non incremental materialized view definition. The default value is false.
    enable_refresh bool
    Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
    refresh_interval_ms int
    The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
    query String
    A query whose result is persisted.
    allowNonIncrementalDefinition Boolean
    Allow non incremental materialized view definition. The default value is false.
    enableRefresh Boolean
    Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
    refreshIntervalMs Number
    The maximum frequency at which this materialized view will be refreshed. The default value is 1800000

    TableRangePartitioning, TableRangePartitioningArgs

    Field string
    The field used to determine how to create a range-based partition.
    Range TableRangePartitioningRange
    Information required to partition based on ranges. Structure is documented below.
    Field string
    The field used to determine how to create a range-based partition.
    Range TableRangePartitioningRange
    Information required to partition based on ranges. Structure is documented below.
    field String
    The field used to determine how to create a range-based partition.
    range TableRangePartitioningRange
    Information required to partition based on ranges. Structure is documented below.
    field string
    The field used to determine how to create a range-based partition.
    range TableRangePartitioningRange
    Information required to partition based on ranges. Structure is documented below.
    field str
    The field used to determine how to create a range-based partition.
    range TableRangePartitioningRange
    Information required to partition based on ranges. Structure is documented below.
    field String
    The field used to determine how to create a range-based partition.
    range Property Map
    Information required to partition based on ranges. Structure is documented below.

    TableRangePartitioningRange, TableRangePartitioningRangeArgs

    End int
    End of the range partitioning, exclusive.
    Interval int
    The width of each range within the partition.
    Start int
    Start of the range partitioning, inclusive.
    End int
    End of the range partitioning, exclusive.
    Interval int
    The width of each range within the partition.
    Start int
    Start of the range partitioning, inclusive.
    end Integer
    End of the range partitioning, exclusive.
    interval Integer
    The width of each range within the partition.
    start Integer
    Start of the range partitioning, inclusive.
    end number
    End of the range partitioning, exclusive.
    interval number
    The width of each range within the partition.
    start number
    Start of the range partitioning, inclusive.
    end int
    End of the range partitioning, exclusive.
    interval int
    The width of each range within the partition.
    start int
    Start of the range partitioning, inclusive.
    end Number
    End of the range partitioning, exclusive.
    interval Number
    The width of each range within the partition.
    start Number
    Start of the range partitioning, inclusive.

    TableTableConstraints, TableTableConstraintsArgs

    ForeignKeys List<TableTableConstraintsForeignKey>
    Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
    PrimaryKey TableTableConstraintsPrimaryKey
    Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
    ForeignKeys []TableTableConstraintsForeignKey
    Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
    PrimaryKey TableTableConstraintsPrimaryKey
    Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
    foreignKeys List<TableTableConstraintsForeignKey>
    Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
    primaryKey TableTableConstraintsPrimaryKey
    Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
    foreignKeys TableTableConstraintsForeignKey[]
    Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
    primaryKey TableTableConstraintsPrimaryKey
    Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
    foreign_keys Sequence[TableTableConstraintsForeignKey]
    Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
    primary_key TableTableConstraintsPrimaryKey
    Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
    foreignKeys List<Property Map>
    Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
    primaryKey Property Map
    Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.

    TableTableConstraintsForeignKey, TableTableConstraintsForeignKeyArgs

    ColumnReferences TableTableConstraintsForeignKeyColumnReferences
    The pair of the foreign key column and primary key column. Structure is documented below.
    ReferencedTable TableTableConstraintsForeignKeyReferencedTable
    The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
    Name string
    Set only if the foreign key constraint is named.
    ColumnReferences TableTableConstraintsForeignKeyColumnReferences
    The pair of the foreign key column and primary key column. Structure is documented below.
    ReferencedTable TableTableConstraintsForeignKeyReferencedTable
    The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
    Name string
    Set only if the foreign key constraint is named.
    columnReferences TableTableConstraintsForeignKeyColumnReferences
    The pair of the foreign key column and primary key column. Structure is documented below.
    referencedTable TableTableConstraintsForeignKeyReferencedTable
    The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
    name String
    Set only if the foreign key constraint is named.
    columnReferences TableTableConstraintsForeignKeyColumnReferences
    The pair of the foreign key column and primary key column. Structure is documented below.
    referencedTable TableTableConstraintsForeignKeyReferencedTable
    The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
    name string
    Set only if the foreign key constraint is named.
    column_references TableTableConstraintsForeignKeyColumnReferences
    The pair of the foreign key column and primary key column. Structure is documented below.
    referenced_table TableTableConstraintsForeignKeyReferencedTable
    The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
    name str
    Set only if the foreign key constraint is named.
    columnReferences Property Map
    The pair of the foreign key column and primary key column. Structure is documented below.
    referencedTable Property Map
    The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
    name String
    Set only if the foreign key constraint is named.

    TableTableConstraintsForeignKeyColumnReferences, TableTableConstraintsForeignKeyColumnReferencesArgs

    ReferencedColumn string
    The column in the primary key that are referenced by the referencingColumn
    ReferencingColumn string
    The column that composes the foreign key.
    ReferencedColumn string
    The column in the primary key that are referenced by the referencingColumn
    ReferencingColumn string
    The column that composes the foreign key.
    referencedColumn String
    The column in the primary key that are referenced by the referencingColumn
    referencingColumn String
    The column that composes the foreign key.
    referencedColumn string
    The column in the primary key that are referenced by the referencingColumn
    referencingColumn string
    The column that composes the foreign key.
    referenced_column str
    The column in the primary key that are referenced by the referencingColumn
    referencing_column str
    The column that composes the foreign key.
    referencedColumn String
    The column in the primary key that are referenced by the referencingColumn
    referencingColumn String
    The column that composes the foreign key.

    TableTableConstraintsForeignKeyReferencedTable, TableTableConstraintsForeignKeyReferencedTableArgs

    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    TableId string
    The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    TableId string
    The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.
    tableId String
    The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
    datasetId string
    The ID of the dataset containing this table.
    projectId string
    The ID of the project containing this table.
    tableId string
    The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
    dataset_id str
    The ID of the dataset containing this table.
    project_id str
    The ID of the project containing this table.
    table_id str
    The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.
    tableId String
    The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.

    TableTableConstraintsPrimaryKey, TableTableConstraintsPrimaryKeyArgs

    Columns List<string>
    The columns that are composed of the primary key constraint.
    Columns []string
    The columns that are composed of the primary key constraint.
    columns List<String>
    The columns that are composed of the primary key constraint.
    columns string[]
    The columns that are composed of the primary key constraint.
    columns Sequence[str]
    The columns that are composed of the primary key constraint.
    columns List<String>
    The columns that are composed of the primary key constraint.

    TableTableReplicationInfo, TableTableReplicationInfoArgs

    SourceDatasetId string
    The ID of the source dataset.
    SourceProjectId string
    The ID of the source project.
    SourceTableId string
    The ID of the source materialized view.
    ReplicationIntervalMs int
    The interval at which the source materialized view is polled for updates. The default is 300000.
    SourceDatasetId string
    The ID of the source dataset.
    SourceProjectId string
    The ID of the source project.
    SourceTableId string
    The ID of the source materialized view.
    ReplicationIntervalMs int
    The interval at which the source materialized view is polled for updates. The default is 300000.
    sourceDatasetId String
    The ID of the source dataset.
    sourceProjectId String
    The ID of the source project.
    sourceTableId String
    The ID of the source materialized view.
    replicationIntervalMs Integer
    The interval at which the source materialized view is polled for updates. The default is 300000.
    sourceDatasetId string
    The ID of the source dataset.
    sourceProjectId string
    The ID of the source project.
    sourceTableId string
    The ID of the source materialized view.
    replicationIntervalMs number
    The interval at which the source materialized view is polled for updates. The default is 300000.
    source_dataset_id str
    The ID of the source dataset.
    source_project_id str
    The ID of the source project.
    source_table_id str
    The ID of the source materialized view.
    replication_interval_ms int
    The interval at which the source materialized view is polled for updates. The default is 300000.
    sourceDatasetId String
    The ID of the source dataset.
    sourceProjectId String
    The ID of the source project.
    sourceTableId String
    The ID of the source materialized view.
    replicationIntervalMs Number
    The interval at which the source materialized view is polled for updates. The default is 300000.

    TableTimePartitioning, TableTimePartitioningArgs

    Type string
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    ExpirationMs int
    Number of milliseconds for which to keep the storage for a partition.
    Field string
    The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. require_partition_filter is deprecated and will be removed in a future major release. Use the top level field with the same name instead.

    Deprecated:This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.

    Type string
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    ExpirationMs int
    Number of milliseconds for which to keep the storage for a partition.
    Field string
    The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
    RequirePartitionFilter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. require_partition_filter is deprecated and will be removed in a future major release. Use the top level field with the same name instead.

    Deprecated:This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.

    type String
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    expirationMs Integer
    Number of milliseconds for which to keep the storage for a partition.
    field String
    The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. require_partition_filter is deprecated and will be removed in a future major release. Use the top level field with the same name instead.

    Deprecated:This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.

    type string
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    expirationMs number
    Number of milliseconds for which to keep the storage for a partition.
    field string
    The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
    requirePartitionFilter boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. require_partition_filter is deprecated and will be removed in a future major release. Use the top level field with the same name instead.

    Deprecated:This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.

    type str
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    expiration_ms int
    Number of milliseconds for which to keep the storage for a partition.
    field str
    The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
    require_partition_filter bool
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. require_partition_filter is deprecated and will be removed in a future major release. Use the top level field with the same name instead.

    Deprecated:This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.

    type String
    The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
    expirationMs Number
    Number of milliseconds for which to keep the storage for a partition.
    field String
    The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
    requirePartitionFilter Boolean
    If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. require_partition_filter is deprecated and will be removed in a future major release. Use the top level field with the same name instead.

    Deprecated:This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.

    TableView, TableViewArgs

    Query string
    A query that BigQuery executes when the view is referenced.
    UseLegacySql bool
    Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
    Query string
    A query that BigQuery executes when the view is referenced.
    UseLegacySql bool
    Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
    query String
    A query that BigQuery executes when the view is referenced.
    useLegacySql Boolean
    Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
    query string
    A query that BigQuery executes when the view is referenced.
    useLegacySql boolean
    Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
    query str
    A query that BigQuery executes when the view is referenced.
    use_legacy_sql bool
    Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
    query String
    A query that BigQuery executes when the view is referenced.
    useLegacySql Boolean
    Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.

    Import

    BigQuery tables can be imported using any of these accepted formats:

    • projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}

    • {{project}}/{{dataset_id}}/{{table_id}}

    • {{dataset_id}}/{{table_id}}

    When using the pulumi import command, BigQuery tables can be imported using one of the formats above. For example:

    $ pulumi import gcp:bigquery/table:Table default projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
    
    $ pulumi import gcp:bigquery/table:Table default {{project}}/{{dataset_id}}/{{table_id}}
    
    $ pulumi import gcp:bigquery/table:Table default {{dataset_id}}/{{table_id}}
    

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v7.16.0 published on Wednesday, Mar 27, 2024 by Pulumi