gcp.bigquery.Table
Explore with Pulumi AI
Creates a table resource in a dataset for Google BigQuery. For more information see the official documentation and API.
Note: On newer versions of the provider, you must explicitly set
deletion_protection=false
(and runpulumi update
to write the field to state) in order to destroy an instance. It is recommended to not set this field (or set it to true) until you’re ready to destroy.
Example Usage
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var defaultDataset = new Gcp.BigQuery.Dataset("defaultDataset", new()
{
DatasetId = "foo",
FriendlyName = "test",
Description = "This is a test description",
Location = "EU",
DefaultTableExpirationMs = 3600000,
Labels =
{
{ "env", "default" },
},
});
var defaultTable = new Gcp.BigQuery.Table("defaultTable", new()
{
DatasetId = defaultDataset.DatasetId,
TableId = "bar",
TimePartitioning = new Gcp.BigQuery.Inputs.TableTimePartitioningArgs
{
Type = "DAY",
},
Labels =
{
{ "env", "default" },
},
Schema = @"[
{
""name"": ""permalink"",
""type"": ""STRING"",
""mode"": ""NULLABLE"",
""description"": ""The Permalink""
},
{
""name"": ""state"",
""type"": ""STRING"",
""mode"": ""NULLABLE"",
""description"": ""State where the head office is located""
}
]
",
});
var sheet = new Gcp.BigQuery.Table("sheet", new()
{
DatasetId = defaultDataset.DatasetId,
TableId = "sheet",
ExternalDataConfiguration = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationArgs
{
Autodetect = true,
SourceFormat = "GOOGLE_SHEETS",
GoogleSheetsOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationGoogleSheetsOptionsArgs
{
SkipLeadingRows = 1,
},
SourceUris = new[]
{
"https://docs.google.com/spreadsheets/d/123456789012345",
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/bigquery"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
defaultDataset, err := bigquery.NewDataset(ctx, "defaultDataset", &bigquery.DatasetArgs{
DatasetId: pulumi.String("foo"),
FriendlyName: pulumi.String("test"),
Description: pulumi.String("This is a test description"),
Location: pulumi.String("EU"),
DefaultTableExpirationMs: pulumi.Int(3600000),
Labels: pulumi.StringMap{
"env": pulumi.String("default"),
},
})
if err != nil {
return err
}
_, err = bigquery.NewTable(ctx, "defaultTable", &bigquery.TableArgs{
DatasetId: defaultDataset.DatasetId,
TableId: pulumi.String("bar"),
TimePartitioning: &bigquery.TableTimePartitioningArgs{
Type: pulumi.String("DAY"),
},
Labels: pulumi.StringMap{
"env": pulumi.String("default"),
},
Schema: pulumi.String(`[
{
"name": "permalink",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Permalink"
},
{
"name": "state",
"type": "STRING",
"mode": "NULLABLE",
"description": "State where the head office is located"
}
]
`),
})
if err != nil {
return err
}
_, err = bigquery.NewTable(ctx, "sheet", &bigquery.TableArgs{
DatasetId: defaultDataset.DatasetId,
TableId: pulumi.String("sheet"),
ExternalDataConfiguration: &bigquery.TableExternalDataConfigurationArgs{
Autodetect: pulumi.Bool(true),
SourceFormat: pulumi.String("GOOGLE_SHEETS"),
GoogleSheetsOptions: &bigquery.TableExternalDataConfigurationGoogleSheetsOptionsArgs{
SkipLeadingRows: pulumi.Int(1),
},
SourceUris: pulumi.StringArray{
pulumi.String("https://docs.google.com/spreadsheets/d/123456789012345"),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.inputs.TableTimePartitioningArgs;
import com.pulumi.gcp.bigquery.inputs.TableExternalDataConfigurationArgs;
import com.pulumi.gcp.bigquery.inputs.TableExternalDataConfigurationGoogleSheetsOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var defaultDataset = new Dataset("defaultDataset", DatasetArgs.builder()
.datasetId("foo")
.friendlyName("test")
.description("This is a test description")
.location("EU")
.defaultTableExpirationMs(3600000)
.labels(Map.of("env", "default"))
.build());
var defaultTable = new Table("defaultTable", TableArgs.builder()
.datasetId(defaultDataset.datasetId())
.tableId("bar")
.timePartitioning(TableTimePartitioningArgs.builder()
.type("DAY")
.build())
.labels(Map.of("env", "default"))
.schema("""
[
{
"name": "permalink",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Permalink"
},
{
"name": "state",
"type": "STRING",
"mode": "NULLABLE",
"description": "State where the head office is located"
}
]
""")
.build());
var sheet = new Table("sheet", TableArgs.builder()
.datasetId(defaultDataset.datasetId())
.tableId("sheet")
.externalDataConfiguration(TableExternalDataConfigurationArgs.builder()
.autodetect(true)
.sourceFormat("GOOGLE_SHEETS")
.googleSheetsOptions(TableExternalDataConfigurationGoogleSheetsOptionsArgs.builder()
.skipLeadingRows(1)
.build())
.sourceUris("https://docs.google.com/spreadsheets/d/123456789012345")
.build())
.build());
}
}
import pulumi
import pulumi_gcp as gcp
default_dataset = gcp.bigquery.Dataset("defaultDataset",
dataset_id="foo",
friendly_name="test",
description="This is a test description",
location="EU",
default_table_expiration_ms=3600000,
labels={
"env": "default",
})
default_table = gcp.bigquery.Table("defaultTable",
dataset_id=default_dataset.dataset_id,
table_id="bar",
time_partitioning=gcp.bigquery.TableTimePartitioningArgs(
type="DAY",
),
labels={
"env": "default",
},
schema="""[
{
"name": "permalink",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Permalink"
},
{
"name": "state",
"type": "STRING",
"mode": "NULLABLE",
"description": "State where the head office is located"
}
]
""")
sheet = gcp.bigquery.Table("sheet",
dataset_id=default_dataset.dataset_id,
table_id="sheet",
external_data_configuration=gcp.bigquery.TableExternalDataConfigurationArgs(
autodetect=True,
source_format="GOOGLE_SHEETS",
google_sheets_options=gcp.bigquery.TableExternalDataConfigurationGoogleSheetsOptionsArgs(
skip_leading_rows=1,
),
source_uris=["https://docs.google.com/spreadsheets/d/123456789012345"],
))
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const defaultDataset = new gcp.bigquery.Dataset("defaultDataset", {
datasetId: "foo",
friendlyName: "test",
description: "This is a test description",
location: "EU",
defaultTableExpirationMs: 3600000,
labels: {
env: "default",
},
});
const defaultTable = new gcp.bigquery.Table("defaultTable", {
datasetId: defaultDataset.datasetId,
tableId: "bar",
timePartitioning: {
type: "DAY",
},
labels: {
env: "default",
},
schema: `[
{
"name": "permalink",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Permalink"
},
{
"name": "state",
"type": "STRING",
"mode": "NULLABLE",
"description": "State where the head office is located"
}
]
`,
});
const sheet = new gcp.bigquery.Table("sheet", {
datasetId: defaultDataset.datasetId,
tableId: "sheet",
externalDataConfiguration: {
autodetect: true,
sourceFormat: "GOOGLE_SHEETS",
googleSheetsOptions: {
skipLeadingRows: 1,
},
sourceUris: ["https://docs.google.com/spreadsheets/d/123456789012345"],
},
});
resources:
defaultDataset:
type: gcp:bigquery:Dataset
properties:
datasetId: foo
friendlyName: test
description: This is a test description
location: EU
defaultTableExpirationMs: 3.6e+06
labels:
env: default
defaultTable:
type: gcp:bigquery:Table
properties:
datasetId: ${defaultDataset.datasetId}
tableId: bar
timePartitioning:
type: DAY
labels:
env: default
schema: |
[
{
"name": "permalink",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Permalink"
},
{
"name": "state",
"type": "STRING",
"mode": "NULLABLE",
"description": "State where the head office is located"
}
]
sheet:
type: gcp:bigquery:Table
properties:
datasetId: ${defaultDataset.datasetId}
tableId: sheet
externalDataConfiguration:
autodetect: true
sourceFormat: GOOGLE_SHEETS
googleSheetsOptions:
skipLeadingRows: 1
sourceUris:
- https://docs.google.com/spreadsheets/d/123456789012345
Create Table Resource
new Table(name: string, args: TableArgs, opts?: CustomResourceOptions);
@overload
def Table(resource_name: str,
opts: Optional[ResourceOptions] = None,
clusterings: Optional[Sequence[str]] = None,
dataset_id: Optional[str] = None,
deletion_protection: Optional[bool] = None,
description: Optional[str] = None,
encryption_configuration: Optional[TableEncryptionConfigurationArgs] = None,
expiration_time: Optional[int] = None,
external_data_configuration: Optional[TableExternalDataConfigurationArgs] = None,
friendly_name: Optional[str] = None,
labels: Optional[Mapping[str, str]] = None,
materialized_view: Optional[TableMaterializedViewArgs] = None,
max_staleness: Optional[str] = None,
project: Optional[str] = None,
range_partitioning: Optional[TableRangePartitioningArgs] = None,
schema: Optional[str] = None,
table_id: Optional[str] = None,
time_partitioning: Optional[TableTimePartitioningArgs] = None,
view: Optional[TableViewArgs] = None)
@overload
def Table(resource_name: str,
args: TableArgs,
opts: Optional[ResourceOptions] = None)
func NewTable(ctx *Context, name string, args TableArgs, opts ...ResourceOption) (*Table, error)
public Table(string name, TableArgs args, CustomResourceOptions? opts = null)
type: gcp:bigquery:Table
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args TableArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args TableArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args TableArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args TableArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args TableArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Table Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Table resource accepts the following input properties:
- Dataset
Id string The dataset ID to create the table in. Changing this forces a new resource to be created.
- Table
Id string A unique ID for the resource. Changing this forces a new resource to be created.
- Clusterings List<string>
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- Deletion
Protection bool Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a
=destroy
or=update
that would delete the instance will fail.- Description string
The field description.
- Encryption
Configuration TableEncryption Configuration Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- Expiration
Time int The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- External
Data TableConfiguration External Data Configuration Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- Friendly
Name string A descriptive name for the table.
- Labels Dictionary<string, string>
A mapping of labels to assign to the resource.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- Materialized
View TableMaterialized View If specified, configures this table as a materialized view. Structure is documented below.
- Max
Staleness string The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type.
- Project string
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Range
Partitioning TableRange Partitioning If specified, configures range-based partitioning for this table. Structure is documented below.
- Schema string
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- Time
Partitioning TableTime Partitioning If specified, configures time-based partitioning for this table. Structure is documented below.
- View
Table
View If specified, configures this table as a view. Structure is documented below.
- Dataset
Id string The dataset ID to create the table in. Changing this forces a new resource to be created.
- Table
Id string A unique ID for the resource. Changing this forces a new resource to be created.
- Clusterings []string
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- Deletion
Protection bool Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a
=destroy
or=update
that would delete the instance will fail.- Description string
The field description.
- Encryption
Configuration TableEncryption Configuration Args Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- Expiration
Time int The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- External
Data TableConfiguration External Data Configuration Args Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- Friendly
Name string A descriptive name for the table.
- Labels map[string]string
A mapping of labels to assign to the resource.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- Materialized
View TableMaterialized View Args If specified, configures this table as a materialized view. Structure is documented below.
- Max
Staleness string The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type.
- Project string
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Range
Partitioning TableRange Partitioning Args If specified, configures range-based partitioning for this table. Structure is documented below.
- Schema string
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- Time
Partitioning TableTime Partitioning Args If specified, configures time-based partitioning for this table. Structure is documented below.
- View
Table
View Args If specified, configures this table as a view. Structure is documented below.
- dataset
Id String The dataset ID to create the table in. Changing this forces a new resource to be created.
- table
Id String A unique ID for the resource. Changing this forces a new resource to be created.
- clusterings List<String>
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- deletion
Protection Boolean Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a
=destroy
or=update
that would delete the instance will fail.- description String
The field description.
- encryption
Configuration TableEncryption Configuration Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- expiration
Time Integer The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external
Data TableConfiguration External Data Configuration Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly
Name String A descriptive name for the table.
- labels Map<String,String>
A mapping of labels to assign to the resource.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- materialized
View TableMaterialized View If specified, configures this table as a materialized view. Structure is documented below.
- max
Staleness String The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type.
- project String
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- range
Partitioning TableRange Partitioning If specified, configures range-based partitioning for this table. Structure is documented below.
- schema String
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- time
Partitioning TableTime Partitioning If specified, configures time-based partitioning for this table. Structure is documented below.
- view
Table
View If specified, configures this table as a view. Structure is documented below.
- dataset
Id string The dataset ID to create the table in. Changing this forces a new resource to be created.
- table
Id string A unique ID for the resource. Changing this forces a new resource to be created.
- clusterings string[]
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- deletion
Protection boolean Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a
=destroy
or=update
that would delete the instance will fail.- description string
The field description.
- encryption
Configuration TableEncryption Configuration Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- expiration
Time number The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external
Data TableConfiguration External Data Configuration Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly
Name string A descriptive name for the table.
- labels {[key: string]: string}
A mapping of labels to assign to the resource.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- materialized
View TableMaterialized View If specified, configures this table as a materialized view. Structure is documented below.
- max
Staleness string The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type.
- project string
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- range
Partitioning TableRange Partitioning If specified, configures range-based partitioning for this table. Structure is documented below.
- schema string
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- time
Partitioning TableTime Partitioning If specified, configures time-based partitioning for this table. Structure is documented below.
- view
Table
View If specified, configures this table as a view. Structure is documented below.
- dataset_
id str The dataset ID to create the table in. Changing this forces a new resource to be created.
- table_
id str A unique ID for the resource. Changing this forces a new resource to be created.
- clusterings Sequence[str]
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- deletion_
protection bool Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a
=destroy
or=update
that would delete the instance will fail.- description str
The field description.
- encryption_
configuration TableEncryption Configuration Args Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- expiration_
time int The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external_
data_ Tableconfiguration External Data Configuration Args Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly_
name str A descriptive name for the table.
- labels Mapping[str, str]
A mapping of labels to assign to the resource.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- materialized_
view TableMaterialized View Args If specified, configures this table as a materialized view. Structure is documented below.
- max_
staleness str The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type.
- project str
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- range_
partitioning TableRange Partitioning Args If specified, configures range-based partitioning for this table. Structure is documented below.
- schema str
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- time_
partitioning TableTime Partitioning Args If specified, configures time-based partitioning for this table. Structure is documented below.
- view
Table
View Args If specified, configures this table as a view. Structure is documented below.
- dataset
Id String The dataset ID to create the table in. Changing this forces a new resource to be created.
- table
Id String A unique ID for the resource. Changing this forces a new resource to be created.
- clusterings List<String>
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- deletion
Protection Boolean Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a
=destroy
or=update
that would delete the instance will fail.- description String
The field description.
- encryption
Configuration Property Map Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- expiration
Time Number The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external
Data Property MapConfiguration Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly
Name String A descriptive name for the table.
- labels Map<String>
A mapping of labels to assign to the resource.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- materialized
View Property Map If specified, configures this table as a materialized view. Structure is documented below.
- max
Staleness String The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type.
- project String
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- range
Partitioning Property Map If specified, configures range-based partitioning for this table. Structure is documented below.
- schema String
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- time
Partitioning Property Map If specified, configures time-based partitioning for this table. Structure is documented below.
- view Property Map
If specified, configures this table as a view. Structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the Table resource produces the following output properties:
- Creation
Time int The time when this table was created, in milliseconds since the epoch.
- Etag string
A hash of the resource.
- Id string
The provider-assigned unique ID for this managed resource.
- Last
Modified intTime The time when this table was last modified, in milliseconds since the epoch.
- Location string
The geographic location where the table resides. This value is inherited from the dataset.
- Num
Bytes int The size of this table in bytes, excluding any data in the streaming buffer.
- Num
Long intTerm Bytes The number of bytes in the table that are considered "long-term storage".
- Num
Rows int The number of rows of data in this table, excluding any data in the streaming buffer.
- Self
Link string The URI of the created resource.
- Type string
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- Creation
Time int The time when this table was created, in milliseconds since the epoch.
- Etag string
A hash of the resource.
- Id string
The provider-assigned unique ID for this managed resource.
- Last
Modified intTime The time when this table was last modified, in milliseconds since the epoch.
- Location string
The geographic location where the table resides. This value is inherited from the dataset.
- Num
Bytes int The size of this table in bytes, excluding any data in the streaming buffer.
- Num
Long intTerm Bytes The number of bytes in the table that are considered "long-term storage".
- Num
Rows int The number of rows of data in this table, excluding any data in the streaming buffer.
- Self
Link string The URI of the created resource.
- Type string
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- creation
Time Integer The time when this table was created, in milliseconds since the epoch.
- etag String
A hash of the resource.
- id String
The provider-assigned unique ID for this managed resource.
- last
Modified IntegerTime The time when this table was last modified, in milliseconds since the epoch.
- location String
The geographic location where the table resides. This value is inherited from the dataset.
- num
Bytes Integer The size of this table in bytes, excluding any data in the streaming buffer.
- num
Long IntegerTerm Bytes The number of bytes in the table that are considered "long-term storage".
- num
Rows Integer The number of rows of data in this table, excluding any data in the streaming buffer.
- self
Link String The URI of the created resource.
- type String
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- creation
Time number The time when this table was created, in milliseconds since the epoch.
- etag string
A hash of the resource.
- id string
The provider-assigned unique ID for this managed resource.
- last
Modified numberTime The time when this table was last modified, in milliseconds since the epoch.
- location string
The geographic location where the table resides. This value is inherited from the dataset.
- num
Bytes number The size of this table in bytes, excluding any data in the streaming buffer.
- num
Long numberTerm Bytes The number of bytes in the table that are considered "long-term storage".
- num
Rows number The number of rows of data in this table, excluding any data in the streaming buffer.
- self
Link string The URI of the created resource.
- type string
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- creation_
time int The time when this table was created, in milliseconds since the epoch.
- etag str
A hash of the resource.
- id str
The provider-assigned unique ID for this managed resource.
- last_
modified_ inttime The time when this table was last modified, in milliseconds since the epoch.
- location str
The geographic location where the table resides. This value is inherited from the dataset.
- num_
bytes int The size of this table in bytes, excluding any data in the streaming buffer.
- num_
long_ intterm_ bytes The number of bytes in the table that are considered "long-term storage".
- num_
rows int The number of rows of data in this table, excluding any data in the streaming buffer.
- self_
link str The URI of the created resource.
- type str
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- creation
Time Number The time when this table was created, in milliseconds since the epoch.
- etag String
A hash of the resource.
- id String
The provider-assigned unique ID for this managed resource.
- last
Modified NumberTime The time when this table was last modified, in milliseconds since the epoch.
- location String
The geographic location where the table resides. This value is inherited from the dataset.
- num
Bytes Number The size of this table in bytes, excluding any data in the streaming buffer.
- num
Long NumberTerm Bytes The number of bytes in the table that are considered "long-term storage".
- num
Rows Number The number of rows of data in this table, excluding any data in the streaming buffer.
- self
Link String The URI of the created resource.
- type String
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
Look up Existing Table Resource
Get an existing Table resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: TableState, opts?: CustomResourceOptions): Table
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
clusterings: Optional[Sequence[str]] = None,
creation_time: Optional[int] = None,
dataset_id: Optional[str] = None,
deletion_protection: Optional[bool] = None,
description: Optional[str] = None,
encryption_configuration: Optional[TableEncryptionConfigurationArgs] = None,
etag: Optional[str] = None,
expiration_time: Optional[int] = None,
external_data_configuration: Optional[TableExternalDataConfigurationArgs] = None,
friendly_name: Optional[str] = None,
labels: Optional[Mapping[str, str]] = None,
last_modified_time: Optional[int] = None,
location: Optional[str] = None,
materialized_view: Optional[TableMaterializedViewArgs] = None,
max_staleness: Optional[str] = None,
num_bytes: Optional[int] = None,
num_long_term_bytes: Optional[int] = None,
num_rows: Optional[int] = None,
project: Optional[str] = None,
range_partitioning: Optional[TableRangePartitioningArgs] = None,
schema: Optional[str] = None,
self_link: Optional[str] = None,
table_id: Optional[str] = None,
time_partitioning: Optional[TableTimePartitioningArgs] = None,
type: Optional[str] = None,
view: Optional[TableViewArgs] = None) -> Table
func GetTable(ctx *Context, name string, id IDInput, state *TableState, opts ...ResourceOption) (*Table, error)
public static Table Get(string name, Input<string> id, TableState? state, CustomResourceOptions? opts = null)
public static Table get(String name, Output<String> id, TableState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Clusterings List<string>
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- Creation
Time int The time when this table was created, in milliseconds since the epoch.
- Dataset
Id string The dataset ID to create the table in. Changing this forces a new resource to be created.
- Deletion
Protection bool Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a
=destroy
or=update
that would delete the instance will fail.- Description string
The field description.
- Encryption
Configuration TableEncryption Configuration Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- Etag string
A hash of the resource.
- Expiration
Time int The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- External
Data TableConfiguration External Data Configuration Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- Friendly
Name string A descriptive name for the table.
- Labels Dictionary<string, string>
A mapping of labels to assign to the resource.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- Last
Modified intTime The time when this table was last modified, in milliseconds since the epoch.
- Location string
The geographic location where the table resides. This value is inherited from the dataset.
- Materialized
View TableMaterialized View If specified, configures this table as a materialized view. Structure is documented below.
- Max
Staleness string The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type.
- Num
Bytes int The size of this table in bytes, excluding any data in the streaming buffer.
- Num
Long intTerm Bytes The number of bytes in the table that are considered "long-term storage".
- Num
Rows int The number of rows of data in this table, excluding any data in the streaming buffer.
- Project string
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Range
Partitioning TableRange Partitioning If specified, configures range-based partitioning for this table. Structure is documented below.
- Schema string
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- Self
Link string The URI of the created resource.
- Table
Id string A unique ID for the resource. Changing this forces a new resource to be created.
- Time
Partitioning TableTime Partitioning If specified, configures time-based partitioning for this table. Structure is documented below.
- Type string
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- View
Table
View If specified, configures this table as a view. Structure is documented below.
- Clusterings []string
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- Creation
Time int The time when this table was created, in milliseconds since the epoch.
- Dataset
Id string The dataset ID to create the table in. Changing this forces a new resource to be created.
- Deletion
Protection bool Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a
=destroy
or=update
that would delete the instance will fail.- Description string
The field description.
- Encryption
Configuration TableEncryption Configuration Args Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- Etag string
A hash of the resource.
- Expiration
Time int The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- External
Data TableConfiguration External Data Configuration Args Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- Friendly
Name string A descriptive name for the table.
- Labels map[string]string
A mapping of labels to assign to the resource.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- Last
Modified intTime The time when this table was last modified, in milliseconds since the epoch.
- Location string
The geographic location where the table resides. This value is inherited from the dataset.
- Materialized
View TableMaterialized View Args If specified, configures this table as a materialized view. Structure is documented below.
- Max
Staleness string The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type.
- Num
Bytes int The size of this table in bytes, excluding any data in the streaming buffer.
- Num
Long intTerm Bytes The number of bytes in the table that are considered "long-term storage".
- Num
Rows int The number of rows of data in this table, excluding any data in the streaming buffer.
- Project string
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Range
Partitioning TableRange Partitioning Args If specified, configures range-based partitioning for this table. Structure is documented below.
- Schema string
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- Self
Link string The URI of the created resource.
- Table
Id string A unique ID for the resource. Changing this forces a new resource to be created.
- Time
Partitioning TableTime Partitioning Args If specified, configures time-based partitioning for this table. Structure is documented below.
- Type string
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- View
Table
View Args If specified, configures this table as a view. Structure is documented below.
- clusterings List<String>
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- creation
Time Integer The time when this table was created, in milliseconds since the epoch.
- dataset
Id String The dataset ID to create the table in. Changing this forces a new resource to be created.
- deletion
Protection Boolean Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a
=destroy
or=update
that would delete the instance will fail.- description String
The field description.
- encryption
Configuration TableEncryption Configuration Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- etag String
A hash of the resource.
- expiration
Time Integer The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external
Data TableConfiguration External Data Configuration Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly
Name String A descriptive name for the table.
- labels Map<String,String>
A mapping of labels to assign to the resource.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- last
Modified IntegerTime The time when this table was last modified, in milliseconds since the epoch.
- location String
The geographic location where the table resides. This value is inherited from the dataset.
- materialized
View TableMaterialized View If specified, configures this table as a materialized view. Structure is documented below.
- max
Staleness String The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type.
- num
Bytes Integer The size of this table in bytes, excluding any data in the streaming buffer.
- num
Long IntegerTerm Bytes The number of bytes in the table that are considered "long-term storage".
- num
Rows Integer The number of rows of data in this table, excluding any data in the streaming buffer.
- project String
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- range
Partitioning TableRange Partitioning If specified, configures range-based partitioning for this table. Structure is documented below.
- schema String
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- self
Link String The URI of the created resource.
- table
Id String A unique ID for the resource. Changing this forces a new resource to be created.
- time
Partitioning TableTime Partitioning If specified, configures time-based partitioning for this table. Structure is documented below.
- type String
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- view
Table
View If specified, configures this table as a view. Structure is documented below.
- clusterings string[]
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- creation
Time number The time when this table was created, in milliseconds since the epoch.
- dataset
Id string The dataset ID to create the table in. Changing this forces a new resource to be created.
- deletion
Protection boolean Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a
=destroy
or=update
that would delete the instance will fail.- description string
The field description.
- encryption
Configuration TableEncryption Configuration Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- etag string
A hash of the resource.
- expiration
Time number The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external
Data TableConfiguration External Data Configuration Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly
Name string A descriptive name for the table.
- labels {[key: string]: string}
A mapping of labels to assign to the resource.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- last
Modified numberTime The time when this table was last modified, in milliseconds since the epoch.
- location string
The geographic location where the table resides. This value is inherited from the dataset.
- materialized
View TableMaterialized View If specified, configures this table as a materialized view. Structure is documented below.
- max
Staleness string The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type.
- num
Bytes number The size of this table in bytes, excluding any data in the streaming buffer.
- num
Long numberTerm Bytes The number of bytes in the table that are considered "long-term storage".
- num
Rows number The number of rows of data in this table, excluding any data in the streaming buffer.
- project string
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- range
Partitioning TableRange Partitioning If specified, configures range-based partitioning for this table. Structure is documented below.
- schema string
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- self
Link string The URI of the created resource.
- table
Id string A unique ID for the resource. Changing this forces a new resource to be created.
- time
Partitioning TableTime Partitioning If specified, configures time-based partitioning for this table. Structure is documented below.
- type string
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- view
Table
View If specified, configures this table as a view. Structure is documented below.
- clusterings Sequence[str]
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- creation_
time int The time when this table was created, in milliseconds since the epoch.
- dataset_
id str The dataset ID to create the table in. Changing this forces a new resource to be created.
- deletion_
protection bool Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a
=destroy
or=update
that would delete the instance will fail.- description str
The field description.
- encryption_
configuration TableEncryption Configuration Args Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- etag str
A hash of the resource.
- expiration_
time int The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external_
data_ Tableconfiguration External Data Configuration Args Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly_
name str A descriptive name for the table.
- labels Mapping[str, str]
A mapping of labels to assign to the resource.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- last_
modified_ inttime The time when this table was last modified, in milliseconds since the epoch.
- location str
The geographic location where the table resides. This value is inherited from the dataset.
- materialized_
view TableMaterialized View Args If specified, configures this table as a materialized view. Structure is documented below.
- max_
staleness str The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type.
- num_
bytes int The size of this table in bytes, excluding any data in the streaming buffer.
- num_
long_ intterm_ bytes The number of bytes in the table that are considered "long-term storage".
- num_
rows int The number of rows of data in this table, excluding any data in the streaming buffer.
- project str
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- range_
partitioning TableRange Partitioning Args If specified, configures range-based partitioning for this table. Structure is documented below.
- schema str
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- self_
link str The URI of the created resource.
- table_
id str A unique ID for the resource. Changing this forces a new resource to be created.
- time_
partitioning TableTime Partitioning Args If specified, configures time-based partitioning for this table. Structure is documented below.
- type str
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- view
Table
View Args If specified, configures this table as a view. Structure is documented below.
- clusterings List<String>
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- creation
Time Number The time when this table was created, in milliseconds since the epoch.
- dataset
Id String The dataset ID to create the table in. Changing this forces a new resource to be created.
- deletion
Protection Boolean Whether or not to allow the provider to destroy the instance. Unless this field is set to false in state, a
=destroy
or=update
that would delete the instance will fail.- description String
The field description.
- encryption
Configuration Property Map Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- etag String
A hash of the resource.
- expiration
Time Number The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external
Data Property MapConfiguration Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly
Name String A descriptive name for the table.
- labels Map<String>
A mapping of labels to assign to the resource.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- last
Modified NumberTime The time when this table was last modified, in milliseconds since the epoch.
- location String
The geographic location where the table resides. This value is inherited from the dataset.
- materialized
View Property Map If specified, configures this table as a materialized view. Structure is documented below.
- max
Staleness String The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type.
- num
Bytes Number The size of this table in bytes, excluding any data in the streaming buffer.
- num
Long NumberTerm Bytes The number of bytes in the table that are considered "long-term storage".
- num
Rows Number The number of rows of data in this table, excluding any data in the streaming buffer.
- project String
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- range
Partitioning Property Map If specified, configures range-based partitioning for this table. Structure is documented below.
- schema String
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- self
Link String The URI of the created resource.
- table
Id String A unique ID for the resource. Changing this forces a new resource to be created.
- time
Partitioning Property Map If specified, configures time-based partitioning for this table. Structure is documented below.
- type String
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- view Property Map
If specified, configures this table as a view. Structure is documented below.
Supporting Types
TableEncryptionConfiguration, TableEncryptionConfigurationArgs
- Kms
Key stringName The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the
gcp.bigquery.getDefaultServiceAccount
datasource and thegcp.kms.CryptoKeyIAMBinding
resource.- Kms
Key stringVersion The self link or full name of the kms key version used to encrypt this table.
- Kms
Key stringName The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the
gcp.bigquery.getDefaultServiceAccount
datasource and thegcp.kms.CryptoKeyIAMBinding
resource.- Kms
Key stringVersion The self link or full name of the kms key version used to encrypt this table.
- kms
Key StringName The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the
gcp.bigquery.getDefaultServiceAccount
datasource and thegcp.kms.CryptoKeyIAMBinding
resource.- kms
Key StringVersion The self link or full name of the kms key version used to encrypt this table.
- kms
Key stringName The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the
gcp.bigquery.getDefaultServiceAccount
datasource and thegcp.kms.CryptoKeyIAMBinding
resource.- kms
Key stringVersion The self link or full name of the kms key version used to encrypt this table.
- kms_
key_ strname The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the
gcp.bigquery.getDefaultServiceAccount
datasource and thegcp.kms.CryptoKeyIAMBinding
resource.- kms_
key_ strversion The self link or full name of the kms key version used to encrypt this table.
- kms
Key StringName The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the
gcp.bigquery.getDefaultServiceAccount
datasource and thegcp.kms.CryptoKeyIAMBinding
resource.- kms
Key StringVersion The self link or full name of the kms key version used to encrypt this table.
TableExternalDataConfiguration, TableExternalDataConfigurationArgs
- Autodetect bool
Let BigQuery try to autodetect the schema and format of the table.
- Source
Uris List<string> A list of the fully-qualified URIs that point to your data in Google Cloud.
- Avro
Options TableExternal Data Configuration Avro Options Additional options if
source_format
is set to "AVRO". Structure is documented below.- Compression string
The compression type of the data source. Valid values are "NONE" or "GZIP".
- Connection
Id string The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The
connection_id
can have the form{{project}}.{{location}}.{{connection_id}}
orprojects/{{project}}/locations/{{location}}/connections/{{connection_id}}
.~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- Csv
Options TableExternal Data Configuration Csv Options Additional properties to set if
source_format
is set to "CSV". Structure is documented below.- File
Set stringSpec Type Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
- Google
Sheets TableOptions External Data Configuration Google Sheets Options Additional options if
source_format
is set to "GOOGLE_SHEETS". Structure is documented below.- Hive
Partitioning TableOptions External Data Configuration Hive Partitioning Options When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
- Ignore
Unknown boolValues Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
- Json
Options TableExternal Data Configuration Json Options Additional properties to set if
source_format
is set to "JSON". Structure is documented below.- Max
Bad intRecords The maximum number of bad records that BigQuery can ignore when reading data.
- Metadata
Cache stringMode Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are
AUTOMATIC
andMANUAL
.- Object
Metadata string Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If
object_metadata
is set,source_format
should be omitted.- Parquet
Options TableExternal Data Configuration Parquet Options Additional properties to set if
source_format
is set to "PARQUET". Structure is documented below.- Reference
File stringSchema Uri When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
- Schema string
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- Source
Format string The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the
scopes
must include "https://www.googleapis.com/auth/drive.readonly".
- Autodetect bool
Let BigQuery try to autodetect the schema and format of the table.
- Source
Uris []string A list of the fully-qualified URIs that point to your data in Google Cloud.
- Avro
Options TableExternal Data Configuration Avro Options Additional options if
source_format
is set to "AVRO". Structure is documented below.- Compression string
The compression type of the data source. Valid values are "NONE" or "GZIP".
- Connection
Id string The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The
connection_id
can have the form{{project}}.{{location}}.{{connection_id}}
orprojects/{{project}}/locations/{{location}}/connections/{{connection_id}}
.~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- Csv
Options TableExternal Data Configuration Csv Options Additional properties to set if
source_format
is set to "CSV". Structure is documented below.- File
Set stringSpec Type Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
- Google
Sheets TableOptions External Data Configuration Google Sheets Options Additional options if
source_format
is set to "GOOGLE_SHEETS". Structure is documented below.- Hive
Partitioning TableOptions External Data Configuration Hive Partitioning Options When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
- Ignore
Unknown boolValues Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
- Json
Options TableExternal Data Configuration Json Options Additional properties to set if
source_format
is set to "JSON". Structure is documented below.- Max
Bad intRecords The maximum number of bad records that BigQuery can ignore when reading data.
- Metadata
Cache stringMode Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are
AUTOMATIC
andMANUAL
.- Object
Metadata string Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If
object_metadata
is set,source_format
should be omitted.- Parquet
Options TableExternal Data Configuration Parquet Options Additional properties to set if
source_format
is set to "PARQUET". Structure is documented below.- Reference
File stringSchema Uri When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
- Schema string
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- Source
Format string The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the
scopes
must include "https://www.googleapis.com/auth/drive.readonly".
- autodetect Boolean
Let BigQuery try to autodetect the schema and format of the table.
- source
Uris List<String> A list of the fully-qualified URIs that point to your data in Google Cloud.
- avro
Options TableExternal Data Configuration Avro Options Additional options if
source_format
is set to "AVRO". Structure is documented below.- compression String
The compression type of the data source. Valid values are "NONE" or "GZIP".
- connection
Id String The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The
connection_id
can have the form{{project}}.{{location}}.{{connection_id}}
orprojects/{{project}}/locations/{{location}}/connections/{{connection_id}}
.~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- csv
Options TableExternal Data Configuration Csv Options Additional properties to set if
source_format
is set to "CSV". Structure is documented below.- file
Set StringSpec Type Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
- google
Sheets TableOptions External Data Configuration Google Sheets Options Additional options if
source_format
is set to "GOOGLE_SHEETS". Structure is documented below.- hive
Partitioning TableOptions External Data Configuration Hive Partitioning Options When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
- ignore
Unknown BooleanValues Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
- json
Options TableExternal Data Configuration Json Options Additional properties to set if
source_format
is set to "JSON". Structure is documented below.- max
Bad IntegerRecords The maximum number of bad records that BigQuery can ignore when reading data.
- metadata
Cache StringMode Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are
AUTOMATIC
andMANUAL
.- object
Metadata String Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If
object_metadata
is set,source_format
should be omitted.- parquet
Options TableExternal Data Configuration Parquet Options Additional properties to set if
source_format
is set to "PARQUET". Structure is documented below.- reference
File StringSchema Uri When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
- schema String
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- source
Format String The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the
scopes
must include "https://www.googleapis.com/auth/drive.readonly".
- autodetect boolean
Let BigQuery try to autodetect the schema and format of the table.
- source
Uris string[] A list of the fully-qualified URIs that point to your data in Google Cloud.
- avro
Options TableExternal Data Configuration Avro Options Additional options if
source_format
is set to "AVRO". Structure is documented below.- compression string
The compression type of the data source. Valid values are "NONE" or "GZIP".
- connection
Id string The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The
connection_id
can have the form{{project}}.{{location}}.{{connection_id}}
orprojects/{{project}}/locations/{{location}}/connections/{{connection_id}}
.~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- csv
Options TableExternal Data Configuration Csv Options Additional properties to set if
source_format
is set to "CSV". Structure is documented below.- file
Set stringSpec Type Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
- google
Sheets TableOptions External Data Configuration Google Sheets Options Additional options if
source_format
is set to "GOOGLE_SHEETS". Structure is documented below.- hive
Partitioning TableOptions External Data Configuration Hive Partitioning Options When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
- ignore
Unknown booleanValues Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
- json
Options TableExternal Data Configuration Json Options Additional properties to set if
source_format
is set to "JSON". Structure is documented below.- max
Bad numberRecords The maximum number of bad records that BigQuery can ignore when reading data.
- metadata
Cache stringMode Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are
AUTOMATIC
andMANUAL
.- object
Metadata string Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If
object_metadata
is set,source_format
should be omitted.- parquet
Options TableExternal Data Configuration Parquet Options Additional properties to set if
source_format
is set to "PARQUET". Structure is documented below.- reference
File stringSchema Uri When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
- schema string
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- source
Format string The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the
scopes
must include "https://www.googleapis.com/auth/drive.readonly".
- autodetect bool
Let BigQuery try to autodetect the schema and format of the table.
- source_
uris Sequence[str] A list of the fully-qualified URIs that point to your data in Google Cloud.
- avro_
options TableExternal Data Configuration Avro Options Additional options if
source_format
is set to "AVRO". Structure is documented below.- compression str
The compression type of the data source. Valid values are "NONE" or "GZIP".
- connection_
id str The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The
connection_id
can have the form{{project}}.{{location}}.{{connection_id}}
orprojects/{{project}}/locations/{{location}}/connections/{{connection_id}}
.~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- csv_
options TableExternal Data Configuration Csv Options Additional properties to set if
source_format
is set to "CSV". Structure is documented below.- file_
set_ strspec_ type Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
- google_
sheets_ Tableoptions External Data Configuration Google Sheets Options Additional options if
source_format
is set to "GOOGLE_SHEETS". Structure is documented below.- hive_
partitioning_ Tableoptions External Data Configuration Hive Partitioning Options When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
- ignore_
unknown_ boolvalues Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
- json_
options TableExternal Data Configuration Json Options Additional properties to set if
source_format
is set to "JSON". Structure is documented below.- max_
bad_ intrecords The maximum number of bad records that BigQuery can ignore when reading data.
- metadata_
cache_ strmode Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are
AUTOMATIC
andMANUAL
.- object_
metadata str Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If
object_metadata
is set,source_format
should be omitted.- parquet_
options TableExternal Data Configuration Parquet Options Additional properties to set if
source_format
is set to "PARQUET". Structure is documented below.- reference_
file_ strschema_ uri When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
- schema str
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- source_
format str The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the
scopes
must include "https://www.googleapis.com/auth/drive.readonly".
- autodetect Boolean
Let BigQuery try to autodetect the schema and format of the table.
- source
Uris List<String> A list of the fully-qualified URIs that point to your data in Google Cloud.
- avro
Options Property Map Additional options if
source_format
is set to "AVRO". Structure is documented below.- compression String
The compression type of the data source. Valid values are "NONE" or "GZIP".
- connection
Id String The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The
connection_id
can have the form{{project}}.{{location}}.{{connection_id}}
orprojects/{{project}}/locations/{{location}}/connections/{{connection_id}}
.~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- csv
Options Property Map Additional properties to set if
source_format
is set to "CSV". Structure is documented below.- file
Set StringSpec Type Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
- google
Sheets Property MapOptions Additional options if
source_format
is set to "GOOGLE_SHEETS". Structure is documented below.- hive
Partitioning Property MapOptions When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
- ignore
Unknown BooleanValues Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
- json
Options Property Map Additional properties to set if
source_format
is set to "JSON". Structure is documented below.- max
Bad NumberRecords The maximum number of bad records that BigQuery can ignore when reading data.
- metadata
Cache StringMode Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are
AUTOMATIC
andMANUAL
.- object
Metadata String Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If
object_metadata
is set,source_format
should be omitted.- parquet
Options Property Map Additional properties to set if
source_format
is set to "PARQUET". Structure is documented below.- reference
File StringSchema Uri When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
- schema String
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- source
Format String The data format. Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS" the
scopes
must include "https://www.googleapis.com/auth/drive.readonly".
TableExternalDataConfigurationAvroOptions, TableExternalDataConfigurationAvroOptionsArgs
- Use
Avro boolLogical Types If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
- Use
Avro boolLogical Types If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
- use
Avro BooleanLogical Types If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
- use
Avro booleanLogical Types If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
- use_
avro_ boollogical_ types If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
- use
Avro BooleanLogical Types If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
TableExternalDataConfigurationCsvOptions, TableExternalDataConfigurationCsvOptionsArgs
- Quote string
The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the
allow_quoted_newlines
property to true. The API-side default is"
, specified in the provider escaped as\"
. Due to limitations with default values, this value is required to be explicitly set.- Allow
Jagged boolRows Indicates if BigQuery should accept rows that are missing trailing optional columns.
- Allow
Quoted boolNewlines Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- Encoding string
The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
- Field
Delimiter string The separator for fields in a CSV file.
- Skip
Leading intRows The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
- Quote string
The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the
allow_quoted_newlines
property to true. The API-side default is"
, specified in the provider escaped as\"
. Due to limitations with default values, this value is required to be explicitly set.- Allow
Jagged boolRows Indicates if BigQuery should accept rows that are missing trailing optional columns.
- Allow
Quoted boolNewlines Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- Encoding string
The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
- Field
Delimiter string The separator for fields in a CSV file.
- Skip
Leading intRows The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
- quote String
The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the
allow_quoted_newlines
property to true. The API-side default is"
, specified in the provider escaped as\"
. Due to limitations with default values, this value is required to be explicitly set.- allow
Jagged BooleanRows Indicates if BigQuery should accept rows that are missing trailing optional columns.
- allow
Quoted BooleanNewlines Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- encoding String
The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
- field
Delimiter String The separator for fields in a CSV file.
- skip
Leading IntegerRows The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
- quote string
The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the
allow_quoted_newlines
property to true. The API-side default is"
, specified in the provider escaped as\"
. Due to limitations with default values, this value is required to be explicitly set.- allow
Jagged booleanRows Indicates if BigQuery should accept rows that are missing trailing optional columns.
- allow
Quoted booleanNewlines Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- encoding string
The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
- field
Delimiter string The separator for fields in a CSV file.
- skip
Leading numberRows The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
- quote str
The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the
allow_quoted_newlines
property to true. The API-side default is"
, specified in the provider escaped as\"
. Due to limitations with default values, this value is required to be explicitly set.- allow_
jagged_ boolrows Indicates if BigQuery should accept rows that are missing trailing optional columns.
- allow_
quoted_ boolnewlines Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- encoding str
The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
- field_
delimiter str The separator for fields in a CSV file.
- skip_
leading_ introws The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
- quote String
The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the
allow_quoted_newlines
property to true. The API-side default is"
, specified in the provider escaped as\"
. Due to limitations with default values, this value is required to be explicitly set.- allow
Jagged BooleanRows Indicates if BigQuery should accept rows that are missing trailing optional columns.
- allow
Quoted BooleanNewlines Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- encoding String
The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
- field
Delimiter String The separator for fields in a CSV file.
- skip
Leading NumberRows The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
TableExternalDataConfigurationGoogleSheetsOptions, TableExternalDataConfigurationGoogleSheetsOptionsArgs
- Range string
Range of a sheet to query from. Only used when non-empty. At least one of
range
orskip_leading_rows
must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"- Skip
Leading intRows The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of
range
orskip_leading_rows
must be set.
- Range string
Range of a sheet to query from. Only used when non-empty. At least one of
range
orskip_leading_rows
must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"- Skip
Leading intRows The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of
range
orskip_leading_rows
must be set.
- range String
Range of a sheet to query from. Only used when non-empty. At least one of
range
orskip_leading_rows
must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"- skip
Leading IntegerRows The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of
range
orskip_leading_rows
must be set.
- range string
Range of a sheet to query from. Only used when non-empty. At least one of
range
orskip_leading_rows
must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"- skip
Leading numberRows The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of
range
orskip_leading_rows
must be set.
- range str
Range of a sheet to query from. Only used when non-empty. At least one of
range
orskip_leading_rows
must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"- skip_
leading_ introws The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of
range
orskip_leading_rows
must be set.
- range String
Range of a sheet to query from. Only used when non-empty. At least one of
range
orskip_leading_rows
must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"- skip
Leading NumberRows The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of
range
orskip_leading_rows
must be set.
TableExternalDataConfigurationHivePartitioningOptions, TableExternalDataConfigurationHivePartitioningOptionsArgs
- Mode string
When set, what mode of hive partitioning to use when reading data. The following modes are supported.
- AUTO: automatically infer partition key name(s) and type(s).
- STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
- CUSTOM: when set to
CUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- Require
Partition boolFilter If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Source
Uri stringPrefix When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout.
gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro
gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro
When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either ofgs://bucket/path_to_table
orgs://bucket/path_to_table/
. Note that whenmode
is set toCUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- Mode string
When set, what mode of hive partitioning to use when reading data. The following modes are supported.
- AUTO: automatically infer partition key name(s) and type(s).
- STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
- CUSTOM: when set to
CUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- Require
Partition boolFilter If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Source
Uri stringPrefix When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout.
gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro
gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro
When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either ofgs://bucket/path_to_table
orgs://bucket/path_to_table/
. Note that whenmode
is set toCUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- mode String
When set, what mode of hive partitioning to use when reading data. The following modes are supported.
- AUTO: automatically infer partition key name(s) and type(s).
- STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
- CUSTOM: when set to
CUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- require
Partition BooleanFilter If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- source
Uri StringPrefix When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout.
gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro
gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro
When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either ofgs://bucket/path_to_table
orgs://bucket/path_to_table/
. Note that whenmode
is set toCUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- mode string
When set, what mode of hive partitioning to use when reading data. The following modes are supported.
- AUTO: automatically infer partition key name(s) and type(s).
- STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
- CUSTOM: when set to
CUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- require
Partition booleanFilter If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- source
Uri stringPrefix When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout.
gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro
gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro
When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either ofgs://bucket/path_to_table
orgs://bucket/path_to_table/
. Note that whenmode
is set toCUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- mode str
When set, what mode of hive partitioning to use when reading data. The following modes are supported.
- AUTO: automatically infer partition key name(s) and type(s).
- STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
- CUSTOM: when set to
CUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- require_
partition_ boolfilter If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- source_
uri_ strprefix When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout.
gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro
gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro
When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either ofgs://bucket/path_to_table
orgs://bucket/path_to_table/
. Note that whenmode
is set toCUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- mode String
When set, what mode of hive partitioning to use when reading data. The following modes are supported.
- AUTO: automatically infer partition key name(s) and type(s).
- STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
- CUSTOM: when set to
CUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- require
Partition BooleanFilter If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- source
Uri StringPrefix When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout.
gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro
gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro
When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either ofgs://bucket/path_to_table
orgs://bucket/path_to_table/
. Note that whenmode
is set toCUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
TableExternalDataConfigurationJsonOptions, TableExternalDataConfigurationJsonOptionsArgs
- Encoding string
The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
- Encoding string
The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
- encoding String
The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
- encoding string
The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
- encoding str
The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
- encoding String
The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
TableExternalDataConfigurationParquetOptions, TableExternalDataConfigurationParquetOptionsArgs
- Enable
List boolInference Indicates whether to use schema inference specifically for Parquet LIST logical type.
- Enum
As boolString Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- Enable
List boolInference Indicates whether to use schema inference specifically for Parquet LIST logical type.
- Enum
As boolString Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable
List BooleanInference Indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum
As BooleanString Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable
List booleanInference Indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum
As booleanString Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable_
list_ boolinference Indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum_
as_ boolstring Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable
List BooleanInference Indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum
As BooleanString Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
TableMaterializedView, TableMaterializedViewArgs
- Query string
A query whose result is persisted.
- Enable
Refresh bool Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
- Refresh
Interval intMs The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
- Query string
A query whose result is persisted.
- Enable
Refresh bool Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
- Refresh
Interval intMs The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
- query String
A query whose result is persisted.
- enable
Refresh Boolean Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
- refresh
Interval IntegerMs The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
- query string
A query whose result is persisted.
- enable
Refresh boolean Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
- refresh
Interval numberMs The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
- query str
A query whose result is persisted.
- enable_
refresh bool Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
- refresh_
interval_ intms The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
- query String
A query whose result is persisted.
- enable
Refresh Boolean Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
- refresh
Interval NumberMs The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
TableRangePartitioning, TableRangePartitioningArgs
- Field string
The field used to determine how to create a range-based partition.
- Range
Table
Range Partitioning Range Information required to partition based on ranges. Structure is documented below.
- Field string
The field used to determine how to create a range-based partition.
- Range
Table
Range Partitioning Range Information required to partition based on ranges. Structure is documented below.
- field String
The field used to determine how to create a range-based partition.
- range
Table
Range Partitioning Range Information required to partition based on ranges. Structure is documented below.
- field string
The field used to determine how to create a range-based partition.
- range
Table
Range Partitioning Range Information required to partition based on ranges. Structure is documented below.
- field str
The field used to determine how to create a range-based partition.
- range
Table
Range Partitioning Range Information required to partition based on ranges. Structure is documented below.
- field String
The field used to determine how to create a range-based partition.
- range Property Map
Information required to partition based on ranges. Structure is documented below.
TableRangePartitioningRange, TableRangePartitioningRangeArgs
TableTimePartitioning, TableTimePartitioningArgs
- Type string
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- Expiration
Ms int Number of milliseconds for which to keep the storage for a partition.
- Field string
The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
- Require
Partition boolFilter If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Type string
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- Expiration
Ms int Number of milliseconds for which to keep the storage for a partition.
- Field string
The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
- Require
Partition boolFilter If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- type String
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- expiration
Ms Integer Number of milliseconds for which to keep the storage for a partition.
- field String
The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
- require
Partition BooleanFilter If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- type string
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- expiration
Ms number Number of milliseconds for which to keep the storage for a partition.
- field string
The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
- require
Partition booleanFilter If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- type str
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- expiration_
ms int Number of milliseconds for which to keep the storage for a partition.
- field str
The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
- require_
partition_ boolfilter If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- type String
The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- expiration
Ms Number Number of milliseconds for which to keep the storage for a partition.
- field String
The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
- require
Partition BooleanFilter If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
TableView, TableViewArgs
- Query string
A query that BigQuery executes when the view is referenced.
- Use
Legacy boolSql Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
- Query string
A query that BigQuery executes when the view is referenced.
- Use
Legacy boolSql Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
- query String
A query that BigQuery executes when the view is referenced.
- use
Legacy BooleanSql Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
- query string
A query that BigQuery executes when the view is referenced.
- use
Legacy booleanSql Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
- query str
A query that BigQuery executes when the view is referenced.
- use_
legacy_ boolsql Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
- query String
A query that BigQuery executes when the view is referenced.
- use
Legacy BooleanSql Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
Import
BigQuery tables imported using any of these accepted formats
$ pulumi import gcp:bigquery/table:Table default projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
$ pulumi import gcp:bigquery/table:Table default {{project}}/{{dataset_id}}/{{table_id}}
$ pulumi import gcp:bigquery/table:Table default {{dataset_id}}/{{table_id}}
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
This Pulumi package is based on the
google-beta
Terraform Provider.