1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. bigquery
  5. Job
Google Cloud Classic v7.2.1 published on Wednesday, Nov 22, 2023 by Pulumi

gcp.bigquery.Job

Explore with Pulumi AI

gcp logo
Google Cloud Classic v7.2.1 published on Wednesday, Nov 22, 2023 by Pulumi

    Jobs are actions that BigQuery runs on your behalf to load data, export data, query data, or copy data. Once a BigQuery job is created, it cannot be changed or deleted.

    To get more information about Job, see:

    Example Usage

    Bigquery Job Query

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var bar = new Gcp.BigQuery.Dataset("bar", new()
        {
            DatasetId = "job_query_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        });
    
        var foo = new Gcp.BigQuery.Table("foo", new()
        {
            DeletionProtection = false,
            DatasetId = bar.DatasetId,
            TableId = "job_query_table",
        });
    
        var job = new Gcp.BigQuery.Job("job", new()
        {
            JobId = "job_query",
            Labels = 
            {
                { "example-label", "example-value" },
            },
            Query = new Gcp.BigQuery.Inputs.JobQueryArgs
            {
                Query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
                DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
                {
                    ProjectId = foo.Project,
                    DatasetId = foo.DatasetId,
                    TableId = foo.TableId,
                },
                AllowLargeResults = true,
                FlattenResults = true,
                ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
                {
                    KeyResultStatement = "LAST",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
    			DatasetId:    pulumi.String("job_query_dataset"),
    			FriendlyName: pulumi.String("test"),
    			Description:  pulumi.String("This is a test description"),
    			Location:     pulumi.String("US"),
    		})
    		if err != nil {
    			return err
    		}
    		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
    			DeletionProtection: pulumi.Bool(false),
    			DatasetId:          bar.DatasetId,
    			TableId:            pulumi.String("job_query_table"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
    			JobId: pulumi.String("job_query"),
    			Labels: pulumi.StringMap{
    				"example-label": pulumi.String("example-value"),
    			},
    			Query: &bigquery.JobQueryArgs{
    				Query: pulumi.String("SELECT state FROM [lookerdata:cdc.project_tycho_reports]"),
    				DestinationTable: &bigquery.JobQueryDestinationTableArgs{
    					ProjectId: foo.Project,
    					DatasetId: foo.DatasetId,
    					TableId:   foo.TableId,
    				},
    				AllowLargeResults: pulumi.Bool(true),
    				FlattenResults:    pulumi.Bool(true),
    				ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
    					KeyResultStatement: pulumi.String("LAST"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.bigquery.Job;
    import com.pulumi.gcp.bigquery.JobArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryDestinationTableArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryScriptOptionsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var bar = new Dataset("bar", DatasetArgs.builder()        
                .datasetId("job_query_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
    
            var foo = new Table("foo", TableArgs.builder()        
                .deletionProtection(false)
                .datasetId(bar.datasetId())
                .tableId("job_query_table")
                .build());
    
            var job = new Job("job", JobArgs.builder()        
                .jobId("job_query")
                .labels(Map.of("example-label", "example-value"))
                .query(JobQueryArgs.builder()
                    .query("SELECT state FROM [lookerdata:cdc.project_tycho_reports]")
                    .destinationTable(JobQueryDestinationTableArgs.builder()
                        .projectId(foo.project())
                        .datasetId(foo.datasetId())
                        .tableId(foo.tableId())
                        .build())
                    .allowLargeResults(true)
                    .flattenResults(true)
                    .scriptOptions(JobQueryScriptOptionsArgs.builder()
                        .keyResultStatement("LAST")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    bar = gcp.bigquery.Dataset("bar",
        dataset_id="job_query_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US")
    foo = gcp.bigquery.Table("foo",
        deletion_protection=False,
        dataset_id=bar.dataset_id,
        table_id="job_query_table")
    job = gcp.bigquery.Job("job",
        job_id="job_query",
        labels={
            "example-label": "example-value",
        },
        query=gcp.bigquery.JobQueryArgs(
            query="SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
            destination_table=gcp.bigquery.JobQueryDestinationTableArgs(
                project_id=foo.project,
                dataset_id=foo.dataset_id,
                table_id=foo.table_id,
            ),
            allow_large_results=True,
            flatten_results=True,
            script_options=gcp.bigquery.JobQueryScriptOptionsArgs(
                key_result_statement="LAST",
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const bar = new gcp.bigquery.Dataset("bar", {
        datasetId: "job_query_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    });
    const foo = new gcp.bigquery.Table("foo", {
        deletionProtection: false,
        datasetId: bar.datasetId,
        tableId: "job_query_table",
    });
    const job = new gcp.bigquery.Job("job", {
        jobId: "job_query",
        labels: {
            "example-label": "example-value",
        },
        query: {
            query: "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
            destinationTable: {
                projectId: foo.project,
                datasetId: foo.datasetId,
                tableId: foo.tableId,
            },
            allowLargeResults: true,
            flattenResults: true,
            scriptOptions: {
                keyResultStatement: "LAST",
            },
        },
    });
    
    resources:
      foo:
        type: gcp:bigquery:Table
        properties:
          deletionProtection: false
          datasetId: ${bar.datasetId}
          tableId: job_query_table
      bar:
        type: gcp:bigquery:Dataset
        properties:
          datasetId: job_query_dataset
          friendlyName: test
          description: This is a test description
          location: US
      job:
        type: gcp:bigquery:Job
        properties:
          jobId: job_query
          labels:
            example-label: example-value
          query:
            query: SELECT state FROM [lookerdata:cdc.project_tycho_reports]
            destinationTable:
              projectId: ${foo.project}
              datasetId: ${foo.datasetId}
              tableId: ${foo.tableId}
            allowLargeResults: true
            flattenResults: true
            scriptOptions:
              keyResultStatement: LAST
    

    Bigquery Job Query Table Reference

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var bar = new Gcp.BigQuery.Dataset("bar", new()
        {
            DatasetId = "job_query_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        });
    
        var foo = new Gcp.BigQuery.Table("foo", new()
        {
            DeletionProtection = false,
            DatasetId = bar.DatasetId,
            TableId = "job_query_table",
        });
    
        var job = new Gcp.BigQuery.Job("job", new()
        {
            JobId = "job_query",
            Labels = 
            {
                { "example-label", "example-value" },
            },
            Query = new Gcp.BigQuery.Inputs.JobQueryArgs
            {
                Query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
                DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
                {
                    TableId = foo.Id,
                },
                DefaultDataset = new Gcp.BigQuery.Inputs.JobQueryDefaultDatasetArgs
                {
                    DatasetId = bar.Id,
                },
                AllowLargeResults = true,
                FlattenResults = true,
                ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
                {
                    KeyResultStatement = "LAST",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
    			DatasetId:    pulumi.String("job_query_dataset"),
    			FriendlyName: pulumi.String("test"),
    			Description:  pulumi.String("This is a test description"),
    			Location:     pulumi.String("US"),
    		})
    		if err != nil {
    			return err
    		}
    		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
    			DeletionProtection: pulumi.Bool(false),
    			DatasetId:          bar.DatasetId,
    			TableId:            pulumi.String("job_query_table"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
    			JobId: pulumi.String("job_query"),
    			Labels: pulumi.StringMap{
    				"example-label": pulumi.String("example-value"),
    			},
    			Query: &bigquery.JobQueryArgs{
    				Query: pulumi.String("SELECT state FROM [lookerdata:cdc.project_tycho_reports]"),
    				DestinationTable: &bigquery.JobQueryDestinationTableArgs{
    					TableId: foo.ID(),
    				},
    				DefaultDataset: &bigquery.JobQueryDefaultDatasetArgs{
    					DatasetId: bar.ID(),
    				},
    				AllowLargeResults: pulumi.Bool(true),
    				FlattenResults:    pulumi.Bool(true),
    				ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
    					KeyResultStatement: pulumi.String("LAST"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.bigquery.Job;
    import com.pulumi.gcp.bigquery.JobArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryDestinationTableArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryDefaultDatasetArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryScriptOptionsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var bar = new Dataset("bar", DatasetArgs.builder()        
                .datasetId("job_query_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
    
            var foo = new Table("foo", TableArgs.builder()        
                .deletionProtection(false)
                .datasetId(bar.datasetId())
                .tableId("job_query_table")
                .build());
    
            var job = new Job("job", JobArgs.builder()        
                .jobId("job_query")
                .labels(Map.of("example-label", "example-value"))
                .query(JobQueryArgs.builder()
                    .query("SELECT state FROM [lookerdata:cdc.project_tycho_reports]")
                    .destinationTable(JobQueryDestinationTableArgs.builder()
                        .tableId(foo.id())
                        .build())
                    .defaultDataset(JobQueryDefaultDatasetArgs.builder()
                        .datasetId(bar.id())
                        .build())
                    .allowLargeResults(true)
                    .flattenResults(true)
                    .scriptOptions(JobQueryScriptOptionsArgs.builder()
                        .keyResultStatement("LAST")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    bar = gcp.bigquery.Dataset("bar",
        dataset_id="job_query_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US")
    foo = gcp.bigquery.Table("foo",
        deletion_protection=False,
        dataset_id=bar.dataset_id,
        table_id="job_query_table")
    job = gcp.bigquery.Job("job",
        job_id="job_query",
        labels={
            "example-label": "example-value",
        },
        query=gcp.bigquery.JobQueryArgs(
            query="SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
            destination_table=gcp.bigquery.JobQueryDestinationTableArgs(
                table_id=foo.id,
            ),
            default_dataset=gcp.bigquery.JobQueryDefaultDatasetArgs(
                dataset_id=bar.id,
            ),
            allow_large_results=True,
            flatten_results=True,
            script_options=gcp.bigquery.JobQueryScriptOptionsArgs(
                key_result_statement="LAST",
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const bar = new gcp.bigquery.Dataset("bar", {
        datasetId: "job_query_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    });
    const foo = new gcp.bigquery.Table("foo", {
        deletionProtection: false,
        datasetId: bar.datasetId,
        tableId: "job_query_table",
    });
    const job = new gcp.bigquery.Job("job", {
        jobId: "job_query",
        labels: {
            "example-label": "example-value",
        },
        query: {
            query: "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
            destinationTable: {
                tableId: foo.id,
            },
            defaultDataset: {
                datasetId: bar.id,
            },
            allowLargeResults: true,
            flattenResults: true,
            scriptOptions: {
                keyResultStatement: "LAST",
            },
        },
    });
    
    resources:
      foo:
        type: gcp:bigquery:Table
        properties:
          deletionProtection: false
          datasetId: ${bar.datasetId}
          tableId: job_query_table
      bar:
        type: gcp:bigquery:Dataset
        properties:
          datasetId: job_query_dataset
          friendlyName: test
          description: This is a test description
          location: US
      job:
        type: gcp:bigquery:Job
        properties:
          jobId: job_query
          labels:
            example-label: example-value
          query:
            query: SELECT state FROM [lookerdata:cdc.project_tycho_reports]
            destinationTable:
              tableId: ${foo.id}
            defaultDataset:
              datasetId: ${bar.id}
            allowLargeResults: true
            flattenResults: true
            scriptOptions:
              keyResultStatement: LAST
    

    Bigquery Job Load

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var bar = new Gcp.BigQuery.Dataset("bar", new()
        {
            DatasetId = "job_load_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        });
    
        var foo = new Gcp.BigQuery.Table("foo", new()
        {
            DeletionProtection = false,
            DatasetId = bar.DatasetId,
            TableId = "job_load_table",
        });
    
        var job = new Gcp.BigQuery.Job("job", new()
        {
            JobId = "job_load",
            Labels = 
            {
                { "my_job", "load" },
            },
            Load = new Gcp.BigQuery.Inputs.JobLoadArgs
            {
                SourceUris = new[]
                {
                    "gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv",
                },
                DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
                {
                    ProjectId = foo.Project,
                    DatasetId = foo.DatasetId,
                    TableId = foo.TableId,
                },
                SkipLeadingRows = 1,
                SchemaUpdateOptions = new[]
                {
                    "ALLOW_FIELD_RELAXATION",
                    "ALLOW_FIELD_ADDITION",
                },
                WriteDisposition = "WRITE_APPEND",
                Autodetect = true,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
    			DatasetId:    pulumi.String("job_load_dataset"),
    			FriendlyName: pulumi.String("test"),
    			Description:  pulumi.String("This is a test description"),
    			Location:     pulumi.String("US"),
    		})
    		if err != nil {
    			return err
    		}
    		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
    			DeletionProtection: pulumi.Bool(false),
    			DatasetId:          bar.DatasetId,
    			TableId:            pulumi.String("job_load_table"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
    			JobId: pulumi.String("job_load"),
    			Labels: pulumi.StringMap{
    				"my_job": pulumi.String("load"),
    			},
    			Load: &bigquery.JobLoadArgs{
    				SourceUris: pulumi.StringArray{
    					pulumi.String("gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"),
    				},
    				DestinationTable: &bigquery.JobLoadDestinationTableArgs{
    					ProjectId: foo.Project,
    					DatasetId: foo.DatasetId,
    					TableId:   foo.TableId,
    				},
    				SkipLeadingRows: pulumi.Int(1),
    				SchemaUpdateOptions: pulumi.StringArray{
    					pulumi.String("ALLOW_FIELD_RELAXATION"),
    					pulumi.String("ALLOW_FIELD_ADDITION"),
    				},
    				WriteDisposition: pulumi.String("WRITE_APPEND"),
    				Autodetect:       pulumi.Bool(true),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.bigquery.Job;
    import com.pulumi.gcp.bigquery.JobArgs;
    import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
    import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var bar = new Dataset("bar", DatasetArgs.builder()        
                .datasetId("job_load_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
    
            var foo = new Table("foo", TableArgs.builder()        
                .deletionProtection(false)
                .datasetId(bar.datasetId())
                .tableId("job_load_table")
                .build());
    
            var job = new Job("job", JobArgs.builder()        
                .jobId("job_load")
                .labels(Map.of("my_job", "load"))
                .load(JobLoadArgs.builder()
                    .sourceUris("gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv")
                    .destinationTable(JobLoadDestinationTableArgs.builder()
                        .projectId(foo.project())
                        .datasetId(foo.datasetId())
                        .tableId(foo.tableId())
                        .build())
                    .skipLeadingRows(1)
                    .schemaUpdateOptions(                
                        "ALLOW_FIELD_RELAXATION",
                        "ALLOW_FIELD_ADDITION")
                    .writeDisposition("WRITE_APPEND")
                    .autodetect(true)
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    bar = gcp.bigquery.Dataset("bar",
        dataset_id="job_load_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US")
    foo = gcp.bigquery.Table("foo",
        deletion_protection=False,
        dataset_id=bar.dataset_id,
        table_id="job_load_table")
    job = gcp.bigquery.Job("job",
        job_id="job_load",
        labels={
            "my_job": "load",
        },
        load=gcp.bigquery.JobLoadArgs(
            source_uris=["gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"],
            destination_table=gcp.bigquery.JobLoadDestinationTableArgs(
                project_id=foo.project,
                dataset_id=foo.dataset_id,
                table_id=foo.table_id,
            ),
            skip_leading_rows=1,
            schema_update_options=[
                "ALLOW_FIELD_RELAXATION",
                "ALLOW_FIELD_ADDITION",
            ],
            write_disposition="WRITE_APPEND",
            autodetect=True,
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const bar = new gcp.bigquery.Dataset("bar", {
        datasetId: "job_load_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    });
    const foo = new gcp.bigquery.Table("foo", {
        deletionProtection: false,
        datasetId: bar.datasetId,
        tableId: "job_load_table",
    });
    const job = new gcp.bigquery.Job("job", {
        jobId: "job_load",
        labels: {
            my_job: "load",
        },
        load: {
            sourceUris: ["gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"],
            destinationTable: {
                projectId: foo.project,
                datasetId: foo.datasetId,
                tableId: foo.tableId,
            },
            skipLeadingRows: 1,
            schemaUpdateOptions: [
                "ALLOW_FIELD_RELAXATION",
                "ALLOW_FIELD_ADDITION",
            ],
            writeDisposition: "WRITE_APPEND",
            autodetect: true,
        },
    });
    
    resources:
      foo:
        type: gcp:bigquery:Table
        properties:
          deletionProtection: false
          datasetId: ${bar.datasetId}
          tableId: job_load_table
      bar:
        type: gcp:bigquery:Dataset
        properties:
          datasetId: job_load_dataset
          friendlyName: test
          description: This is a test description
          location: US
      job:
        type: gcp:bigquery:Job
        properties:
          jobId: job_load
          labels:
            my_job: load
          load:
            sourceUris:
              - gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv
            destinationTable:
              projectId: ${foo.project}
              datasetId: ${foo.datasetId}
              tableId: ${foo.tableId}
            skipLeadingRows: 1
            schemaUpdateOptions:
              - ALLOW_FIELD_RELAXATION
              - ALLOW_FIELD_ADDITION
            writeDisposition: WRITE_APPEND
            autodetect: true
    

    Bigquery Job Load Parquet

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var testBucket = new Gcp.Storage.Bucket("testBucket", new()
        {
            Location = "US",
            UniformBucketLevelAccess = true,
        });
    
        var testBucketObject = new Gcp.Storage.BucketObject("testBucketObject", new()
        {
            Source = new FileAsset("./test-fixtures/test.parquet.gzip"),
            Bucket = testBucket.Name,
        });
    
        var testDataset = new Gcp.BigQuery.Dataset("testDataset", new()
        {
            DatasetId = "job_load_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        });
    
        var testTable = new Gcp.BigQuery.Table("testTable", new()
        {
            DeletionProtection = false,
            TableId = "job_load_table",
            DatasetId = testDataset.DatasetId,
        });
    
        var job = new Gcp.BigQuery.Job("job", new()
        {
            JobId = "job_load",
            Labels = 
            {
                { "my_job", "load" },
            },
            Load = new Gcp.BigQuery.Inputs.JobLoadArgs
            {
                SourceUris = new[]
                {
                    Output.Tuple(testBucketObject.Bucket, testBucketObject.Name).Apply(values =>
                    {
                        var bucket = values.Item1;
                        var name = values.Item2;
                        return $"gs://{bucket}/{name}";
                    }),
                },
                DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
                {
                    ProjectId = testTable.Project,
                    DatasetId = testTable.DatasetId,
                    TableId = testTable.TableId,
                },
                SchemaUpdateOptions = new[]
                {
                    "ALLOW_FIELD_RELAXATION",
                    "ALLOW_FIELD_ADDITION",
                },
                WriteDisposition = "WRITE_APPEND",
                SourceFormat = "PARQUET",
                Autodetect = true,
                ParquetOptions = new Gcp.BigQuery.Inputs.JobLoadParquetOptionsArgs
                {
                    EnumAsString = true,
                    EnableListInference = true,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/storage"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		testBucket, err := storage.NewBucket(ctx, "testBucket", &storage.BucketArgs{
    			Location:                 pulumi.String("US"),
    			UniformBucketLevelAccess: pulumi.Bool(true),
    		})
    		if err != nil {
    			return err
    		}
    		testBucketObject, err := storage.NewBucketObject(ctx, "testBucketObject", &storage.BucketObjectArgs{
    			Source: pulumi.NewFileAsset("./test-fixtures/test.parquet.gzip"),
    			Bucket: testBucket.Name,
    		})
    		if err != nil {
    			return err
    		}
    		testDataset, err := bigquery.NewDataset(ctx, "testDataset", &bigquery.DatasetArgs{
    			DatasetId:    pulumi.String("job_load_dataset"),
    			FriendlyName: pulumi.String("test"),
    			Description:  pulumi.String("This is a test description"),
    			Location:     pulumi.String("US"),
    		})
    		if err != nil {
    			return err
    		}
    		testTable, err := bigquery.NewTable(ctx, "testTable", &bigquery.TableArgs{
    			DeletionProtection: pulumi.Bool(false),
    			TableId:            pulumi.String("job_load_table"),
    			DatasetId:          testDataset.DatasetId,
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
    			JobId: pulumi.String("job_load"),
    			Labels: pulumi.StringMap{
    				"my_job": pulumi.String("load"),
    			},
    			Load: &bigquery.JobLoadArgs{
    				SourceUris: pulumi.StringArray{
    					pulumi.All(testBucketObject.Bucket, testBucketObject.Name).ApplyT(func(_args []interface{}) (string, error) {
    						bucket := _args[0].(string)
    						name := _args[1].(string)
    						return fmt.Sprintf("gs://%v/%v", bucket, name), nil
    					}).(pulumi.StringOutput),
    				},
    				DestinationTable: &bigquery.JobLoadDestinationTableArgs{
    					ProjectId: testTable.Project,
    					DatasetId: testTable.DatasetId,
    					TableId:   testTable.TableId,
    				},
    				SchemaUpdateOptions: pulumi.StringArray{
    					pulumi.String("ALLOW_FIELD_RELAXATION"),
    					pulumi.String("ALLOW_FIELD_ADDITION"),
    				},
    				WriteDisposition: pulumi.String("WRITE_APPEND"),
    				SourceFormat:     pulumi.String("PARQUET"),
    				Autodetect:       pulumi.Bool(true),
    				ParquetOptions: &bigquery.JobLoadParquetOptionsArgs{
    					EnumAsString:        pulumi.Bool(true),
    					EnableListInference: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.storage.Bucket;
    import com.pulumi.gcp.storage.BucketArgs;
    import com.pulumi.gcp.storage.BucketObject;
    import com.pulumi.gcp.storage.BucketObjectArgs;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.bigquery.Job;
    import com.pulumi.gcp.bigquery.JobArgs;
    import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
    import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
    import com.pulumi.gcp.bigquery.inputs.JobLoadParquetOptionsArgs;
    import com.pulumi.asset.FileAsset;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var testBucket = new Bucket("testBucket", BucketArgs.builder()        
                .location("US")
                .uniformBucketLevelAccess(true)
                .build());
    
            var testBucketObject = new BucketObject("testBucketObject", BucketObjectArgs.builder()        
                .source(new FileAsset("./test-fixtures/test.parquet.gzip"))
                .bucket(testBucket.name())
                .build());
    
            var testDataset = new Dataset("testDataset", DatasetArgs.builder()        
                .datasetId("job_load_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
    
            var testTable = new Table("testTable", TableArgs.builder()        
                .deletionProtection(false)
                .tableId("job_load_table")
                .datasetId(testDataset.datasetId())
                .build());
    
            var job = new Job("job", JobArgs.builder()        
                .jobId("job_load")
                .labels(Map.of("my_job", "load"))
                .load(JobLoadArgs.builder()
                    .sourceUris(Output.tuple(testBucketObject.bucket(), testBucketObject.name()).applyValue(values -> {
                        var bucket = values.t1;
                        var name = values.t2;
                        return String.format("gs://%s/%s", bucket,name);
                    }))
                    .destinationTable(JobLoadDestinationTableArgs.builder()
                        .projectId(testTable.project())
                        .datasetId(testTable.datasetId())
                        .tableId(testTable.tableId())
                        .build())
                    .schemaUpdateOptions(                
                        "ALLOW_FIELD_RELAXATION",
                        "ALLOW_FIELD_ADDITION")
                    .writeDisposition("WRITE_APPEND")
                    .sourceFormat("PARQUET")
                    .autodetect(true)
                    .parquetOptions(JobLoadParquetOptionsArgs.builder()
                        .enumAsString(true)
                        .enableListInference(true)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    test_bucket = gcp.storage.Bucket("testBucket",
        location="US",
        uniform_bucket_level_access=True)
    test_bucket_object = gcp.storage.BucketObject("testBucketObject",
        source=pulumi.FileAsset("./test-fixtures/test.parquet.gzip"),
        bucket=test_bucket.name)
    test_dataset = gcp.bigquery.Dataset("testDataset",
        dataset_id="job_load_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US")
    test_table = gcp.bigquery.Table("testTable",
        deletion_protection=False,
        table_id="job_load_table",
        dataset_id=test_dataset.dataset_id)
    job = gcp.bigquery.Job("job",
        job_id="job_load",
        labels={
            "my_job": "load",
        },
        load=gcp.bigquery.JobLoadArgs(
            source_uris=[pulumi.Output.all(test_bucket_object.bucket, test_bucket_object.name).apply(lambda bucket, name: f"gs://{bucket}/{name}")],
            destination_table=gcp.bigquery.JobLoadDestinationTableArgs(
                project_id=test_table.project,
                dataset_id=test_table.dataset_id,
                table_id=test_table.table_id,
            ),
            schema_update_options=[
                "ALLOW_FIELD_RELAXATION",
                "ALLOW_FIELD_ADDITION",
            ],
            write_disposition="WRITE_APPEND",
            source_format="PARQUET",
            autodetect=True,
            parquet_options=gcp.bigquery.JobLoadParquetOptionsArgs(
                enum_as_string=True,
                enable_list_inference=True,
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const testBucket = new gcp.storage.Bucket("testBucket", {
        location: "US",
        uniformBucketLevelAccess: true,
    });
    const testBucketObject = new gcp.storage.BucketObject("testBucketObject", {
        source: new pulumi.asset.FileAsset("./test-fixtures/test.parquet.gzip"),
        bucket: testBucket.name,
    });
    const testDataset = new gcp.bigquery.Dataset("testDataset", {
        datasetId: "job_load_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    });
    const testTable = new gcp.bigquery.Table("testTable", {
        deletionProtection: false,
        tableId: "job_load_table",
        datasetId: testDataset.datasetId,
    });
    const job = new gcp.bigquery.Job("job", {
        jobId: "job_load",
        labels: {
            my_job: "load",
        },
        load: {
            sourceUris: [pulumi.interpolate`gs://${testBucketObject.bucket}/${testBucketObject.name}`],
            destinationTable: {
                projectId: testTable.project,
                datasetId: testTable.datasetId,
                tableId: testTable.tableId,
            },
            schemaUpdateOptions: [
                "ALLOW_FIELD_RELAXATION",
                "ALLOW_FIELD_ADDITION",
            ],
            writeDisposition: "WRITE_APPEND",
            sourceFormat: "PARQUET",
            autodetect: true,
            parquetOptions: {
                enumAsString: true,
                enableListInference: true,
            },
        },
    });
    
    resources:
      testBucket:
        type: gcp:storage:Bucket
        properties:
          location: US
          uniformBucketLevelAccess: true
      testBucketObject:
        type: gcp:storage:BucketObject
        properties:
          source:
            fn::FileAsset: ./test-fixtures/test.parquet.gzip
          bucket: ${testBucket.name}
      testDataset:
        type: gcp:bigquery:Dataset
        properties:
          datasetId: job_load_dataset
          friendlyName: test
          description: This is a test description
          location: US
      testTable:
        type: gcp:bigquery:Table
        properties:
          deletionProtection: false
          tableId: job_load_table
          datasetId: ${testDataset.datasetId}
      job:
        type: gcp:bigquery:Job
        properties:
          jobId: job_load
          labels:
            my_job: load
          load:
            sourceUris:
              - gs://${testBucketObject.bucket}/${testBucketObject.name}
            destinationTable:
              projectId: ${testTable.project}
              datasetId: ${testTable.datasetId}
              tableId: ${testTable.tableId}
            schemaUpdateOptions:
              - ALLOW_FIELD_RELAXATION
              - ALLOW_FIELD_ADDITION
            writeDisposition: WRITE_APPEND
            sourceFormat: PARQUET
            autodetect: true
            parquetOptions:
              enumAsString: true
              enableListInference: true
    

    Bigquery Job Extract

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var source_oneDataset = new Gcp.BigQuery.Dataset("source-oneDataset", new()
        {
            DatasetId = "job_extract_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        });
    
        var source_oneTable = new Gcp.BigQuery.Table("source-oneTable", new()
        {
            DeletionProtection = false,
            DatasetId = source_oneDataset.DatasetId,
            TableId = "job_extract_table",
            Schema = @"[
      {
        ""name"": ""name"",
        ""type"": ""STRING"",
        ""mode"": ""NULLABLE""
      },
      {
        ""name"": ""post_abbr"",
        ""type"": ""STRING"",
        ""mode"": ""NULLABLE""
      },
      {
        ""name"": ""date"",
        ""type"": ""DATE"",
        ""mode"": ""NULLABLE""
      }
    ]
    ",
        });
    
        var dest = new Gcp.Storage.Bucket("dest", new()
        {
            Location = "US",
            ForceDestroy = true,
        });
    
        var job = new Gcp.BigQuery.Job("job", new()
        {
            JobId = "job_extract",
            Extract = new Gcp.BigQuery.Inputs.JobExtractArgs
            {
                DestinationUris = new[]
                {
                    dest.Url.Apply(url => $"{url}/extract"),
                },
                SourceTable = new Gcp.BigQuery.Inputs.JobExtractSourceTableArgs
                {
                    ProjectId = source_oneTable.Project,
                    DatasetId = source_oneTable.DatasetId,
                    TableId = source_oneTable.TableId,
                },
                DestinationFormat = "NEWLINE_DELIMITED_JSON",
                Compression = "GZIP",
            },
        });
    
    });
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/storage"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := bigquery.NewDataset(ctx, "source-oneDataset", &bigquery.DatasetArgs{
    			DatasetId:    pulumi.String("job_extract_dataset"),
    			FriendlyName: pulumi.String("test"),
    			Description:  pulumi.String("This is a test description"),
    			Location:     pulumi.String("US"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewTable(ctx, "source-oneTable", &bigquery.TableArgs{
    			DeletionProtection: pulumi.Bool(false),
    			DatasetId:          source_oneDataset.DatasetId,
    			TableId:            pulumi.String("job_extract_table"),
    			Schema: pulumi.String(`[
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
    `),
    		})
    		if err != nil {
    			return err
    		}
    		dest, err := storage.NewBucket(ctx, "dest", &storage.BucketArgs{
    			Location:     pulumi.String("US"),
    			ForceDestroy: pulumi.Bool(true),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
    			JobId: pulumi.String("job_extract"),
    			Extract: &bigquery.JobExtractArgs{
    				DestinationUris: pulumi.StringArray{
    					dest.Url.ApplyT(func(url string) (string, error) {
    						return fmt.Sprintf("%v/extract", url), nil
    					}).(pulumi.StringOutput),
    				},
    				SourceTable: &bigquery.JobExtractSourceTableArgs{
    					ProjectId: source_oneTable.Project,
    					DatasetId: source_oneTable.DatasetId,
    					TableId:   source_oneTable.TableId,
    				},
    				DestinationFormat: pulumi.String("NEWLINE_DELIMITED_JSON"),
    				Compression:       pulumi.String("GZIP"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.storage.Bucket;
    import com.pulumi.gcp.storage.BucketArgs;
    import com.pulumi.gcp.bigquery.Job;
    import com.pulumi.gcp.bigquery.JobArgs;
    import com.pulumi.gcp.bigquery.inputs.JobExtractArgs;
    import com.pulumi.gcp.bigquery.inputs.JobExtractSourceTableArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var source_oneDataset = new Dataset("source-oneDataset", DatasetArgs.builder()        
                .datasetId("job_extract_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
    
            var source_oneTable = new Table("source-oneTable", TableArgs.builder()        
                .deletionProtection(false)
                .datasetId(source_oneDataset.datasetId())
                .tableId("job_extract_table")
                .schema("""
    [
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
                """)
                .build());
    
            var dest = new Bucket("dest", BucketArgs.builder()        
                .location("US")
                .forceDestroy(true)
                .build());
    
            var job = new Job("job", JobArgs.builder()        
                .jobId("job_extract")
                .extract(JobExtractArgs.builder()
                    .destinationUris(dest.url().applyValue(url -> String.format("%s/extract", url)))
                    .sourceTable(JobExtractSourceTableArgs.builder()
                        .projectId(source_oneTable.project())
                        .datasetId(source_oneTable.datasetId())
                        .tableId(source_oneTable.tableId())
                        .build())
                    .destinationFormat("NEWLINE_DELIMITED_JSON")
                    .compression("GZIP")
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_gcp as gcp
    
    source_one_dataset = gcp.bigquery.Dataset("source-oneDataset",
        dataset_id="job_extract_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US")
    source_one_table = gcp.bigquery.Table("source-oneTable",
        deletion_protection=False,
        dataset_id=source_one_dataset.dataset_id,
        table_id="job_extract_table",
        schema="""[
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
    """)
    dest = gcp.storage.Bucket("dest",
        location="US",
        force_destroy=True)
    job = gcp.bigquery.Job("job",
        job_id="job_extract",
        extract=gcp.bigquery.JobExtractArgs(
            destination_uris=[dest.url.apply(lambda url: f"{url}/extract")],
            source_table=gcp.bigquery.JobExtractSourceTableArgs(
                project_id=source_one_table.project,
                dataset_id=source_one_table.dataset_id,
                table_id=source_one_table.table_id,
            ),
            destination_format="NEWLINE_DELIMITED_JSON",
            compression="GZIP",
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const source_oneDataset = new gcp.bigquery.Dataset("source-oneDataset", {
        datasetId: "job_extract_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    });
    const source_oneTable = new gcp.bigquery.Table("source-oneTable", {
        deletionProtection: false,
        datasetId: source_oneDataset.datasetId,
        tableId: "job_extract_table",
        schema: `[
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
    `,
    });
    const dest = new gcp.storage.Bucket("dest", {
        location: "US",
        forceDestroy: true,
    });
    const job = new gcp.bigquery.Job("job", {
        jobId: "job_extract",
        extract: {
            destinationUris: [pulumi.interpolate`${dest.url}/extract`],
            sourceTable: {
                projectId: source_oneTable.project,
                datasetId: source_oneTable.datasetId,
                tableId: source_oneTable.tableId,
            },
            destinationFormat: "NEWLINE_DELIMITED_JSON",
            compression: "GZIP",
        },
    });
    
    resources:
      source-oneTable:
        type: gcp:bigquery:Table
        properties:
          deletionProtection: false
          datasetId: ${["source-oneDataset"].datasetId}
          tableId: job_extract_table
          schema: |
            [
              {
                "name": "name",
                "type": "STRING",
                "mode": "NULLABLE"
              },
              {
                "name": "post_abbr",
                "type": "STRING",
                "mode": "NULLABLE"
              },
              {
                "name": "date",
                "type": "DATE",
                "mode": "NULLABLE"
              }
            ]        
      source-oneDataset:
        type: gcp:bigquery:Dataset
        properties:
          datasetId: job_extract_dataset
          friendlyName: test
          description: This is a test description
          location: US
      dest:
        type: gcp:storage:Bucket
        properties:
          location: US
          forceDestroy: true
      job:
        type: gcp:bigquery:Job
        properties:
          jobId: job_extract
          extract:
            destinationUris:
              - ${dest.url}/extract
            sourceTable:
              projectId: ${["source-oneTable"].project}
              datasetId: ${["source-oneTable"].datasetId}
              tableId: ${["source-oneTable"].tableId}
            destinationFormat: NEWLINE_DELIMITED_JSON
            compression: GZIP
    

    Create Job Resource

    new Job(name: string, args: JobArgs, opts?: CustomResourceOptions);
    @overload
    def Job(resource_name: str,
            opts: Optional[ResourceOptions] = None,
            copy: Optional[JobCopyArgs] = None,
            extract: Optional[JobExtractArgs] = None,
            job_id: Optional[str] = None,
            job_timeout_ms: Optional[str] = None,
            labels: Optional[Mapping[str, str]] = None,
            load: Optional[JobLoadArgs] = None,
            location: Optional[str] = None,
            project: Optional[str] = None,
            query: Optional[JobQueryArgs] = None)
    @overload
    def Job(resource_name: str,
            args: JobArgs,
            opts: Optional[ResourceOptions] = None)
    func NewJob(ctx *Context, name string, args JobArgs, opts ...ResourceOption) (*Job, error)
    public Job(string name, JobArgs args, CustomResourceOptions? opts = null)
    public Job(String name, JobArgs args)
    public Job(String name, JobArgs args, CustomResourceOptions options)
    
    type: gcp:bigquery:Job
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Job Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Job resource accepts the following input properties:

    JobId string

    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.

    Copy JobCopy

    Copies a table. Structure is documented below.

    Extract JobExtract

    Configures an extract job. Structure is documented below.

    JobTimeoutMs string

    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.

    Labels Dictionary<string, string>

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    Load JobLoad

    Configures a load job. Structure is documented below.

    Location string

    The geographic location of the job. The default value is US.

    Project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    Query JobQuery

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    JobId string

    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.

    Copy JobCopyArgs

    Copies a table. Structure is documented below.

    Extract JobExtractArgs

    Configures an extract job. Structure is documented below.

    JobTimeoutMs string

    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.

    Labels map[string]string

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    Load JobLoadArgs

    Configures a load job. Structure is documented below.

    Location string

    The geographic location of the job. The default value is US.

    Project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    Query JobQueryArgs

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    jobId String

    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.

    copy JobCopy

    Copies a table. Structure is documented below.

    extract JobExtract

    Configures an extract job. Structure is documented below.

    jobTimeoutMs String

    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.

    labels Map<String,String>

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load JobLoad

    Configures a load job. Structure is documented below.

    location String

    The geographic location of the job. The default value is US.

    project String

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    query JobQuery

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    jobId string

    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.

    copy JobCopy

    Copies a table. Structure is documented below.

    extract JobExtract

    Configures an extract job. Structure is documented below.

    jobTimeoutMs string

    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.

    labels {[key: string]: string}

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load JobLoad

    Configures a load job. Structure is documented below.

    location string

    The geographic location of the job. The default value is US.

    project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    query JobQuery

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    job_id str

    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.

    copy JobCopyArgs

    Copies a table. Structure is documented below.

    extract JobExtractArgs

    Configures an extract job. Structure is documented below.

    job_timeout_ms str

    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.

    labels Mapping[str, str]

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load JobLoadArgs

    Configures a load job. Structure is documented below.

    location str

    The geographic location of the job. The default value is US.

    project str

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    query JobQueryArgs

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    jobId String

    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.

    copy Property Map

    Copies a table. Structure is documented below.

    extract Property Map

    Configures an extract job. Structure is documented below.

    jobTimeoutMs String

    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.

    labels Map<String>

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load Property Map

    Configures a load job. Structure is documented below.

    location String

    The geographic location of the job. The default value is US.

    project String

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    query Property Map

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Job resource produces the following output properties:

    EffectiveLabels Dictionary<string, string>

    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    Id string

    The provider-assigned unique ID for this managed resource.

    JobType string

    (Output) The type of the job.

    PulumiLabels Dictionary<string, string>

    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.

    Statuses List<JobStatus>

    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.

    UserEmail string

    Email address of the user who ran the job.

    EffectiveLabels map[string]string

    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    Id string

    The provider-assigned unique ID for this managed resource.

    JobType string

    (Output) The type of the job.

    PulumiLabels map[string]string

    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.

    Statuses []JobStatus

    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.

    UserEmail string

    Email address of the user who ran the job.

    effectiveLabels Map<String,String>

    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    id String

    The provider-assigned unique ID for this managed resource.

    jobType String

    (Output) The type of the job.

    pulumiLabels Map<String,String>

    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.

    statuses List<JobStatus>

    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.

    userEmail String

    Email address of the user who ran the job.

    effectiveLabels {[key: string]: string}

    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    id string

    The provider-assigned unique ID for this managed resource.

    jobType string

    (Output) The type of the job.

    pulumiLabels {[key: string]: string}

    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.

    statuses JobStatus[]

    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.

    userEmail string

    Email address of the user who ran the job.

    effective_labels Mapping[str, str]

    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    id str

    The provider-assigned unique ID for this managed resource.

    job_type str

    (Output) The type of the job.

    pulumi_labels Mapping[str, str]

    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.

    statuses Sequence[JobStatus]

    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.

    user_email str

    Email address of the user who ran the job.

    effectiveLabels Map<String>

    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    id String

    The provider-assigned unique ID for this managed resource.

    jobType String

    (Output) The type of the job.

    pulumiLabels Map<String>

    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.

    statuses List<Property Map>

    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.

    userEmail String

    Email address of the user who ran the job.

    Look up Existing Job Resource

    Get an existing Job resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: JobState, opts?: CustomResourceOptions): Job
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            copy: Optional[JobCopyArgs] = None,
            effective_labels: Optional[Mapping[str, str]] = None,
            extract: Optional[JobExtractArgs] = None,
            job_id: Optional[str] = None,
            job_timeout_ms: Optional[str] = None,
            job_type: Optional[str] = None,
            labels: Optional[Mapping[str, str]] = None,
            load: Optional[JobLoadArgs] = None,
            location: Optional[str] = None,
            project: Optional[str] = None,
            pulumi_labels: Optional[Mapping[str, str]] = None,
            query: Optional[JobQueryArgs] = None,
            statuses: Optional[Sequence[JobStatusArgs]] = None,
            user_email: Optional[str] = None) -> Job
    func GetJob(ctx *Context, name string, id IDInput, state *JobState, opts ...ResourceOption) (*Job, error)
    public static Job Get(string name, Input<string> id, JobState? state, CustomResourceOptions? opts = null)
    public static Job get(String name, Output<String> id, JobState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Copy JobCopy

    Copies a table. Structure is documented below.

    EffectiveLabels Dictionary<string, string>

    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    Extract JobExtract

    Configures an extract job. Structure is documented below.

    JobId string

    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.

    JobTimeoutMs string

    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.

    JobType string

    (Output) The type of the job.

    Labels Dictionary<string, string>

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    Load JobLoad

    Configures a load job. Structure is documented below.

    Location string

    The geographic location of the job. The default value is US.

    Project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    PulumiLabels Dictionary<string, string>

    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.

    Query JobQuery

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    Statuses List<JobStatus>

    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.

    UserEmail string

    Email address of the user who ran the job.

    Copy JobCopyArgs

    Copies a table. Structure is documented below.

    EffectiveLabels map[string]string

    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    Extract JobExtractArgs

    Configures an extract job. Structure is documented below.

    JobId string

    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.

    JobTimeoutMs string

    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.

    JobType string

    (Output) The type of the job.

    Labels map[string]string

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    Load JobLoadArgs

    Configures a load job. Structure is documented below.

    Location string

    The geographic location of the job. The default value is US.

    Project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    PulumiLabels map[string]string

    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.

    Query JobQueryArgs

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    Statuses []JobStatusArgs

    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.

    UserEmail string

    Email address of the user who ran the job.

    copy JobCopy

    Copies a table. Structure is documented below.

    effectiveLabels Map<String,String>

    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    extract JobExtract

    Configures an extract job. Structure is documented below.

    jobId String

    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.

    jobTimeoutMs String

    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.

    jobType String

    (Output) The type of the job.

    labels Map<String,String>

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load JobLoad

    Configures a load job. Structure is documented below.

    location String

    The geographic location of the job. The default value is US.

    project String

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    pulumiLabels Map<String,String>

    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.

    query JobQuery

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    statuses List<JobStatus>

    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.

    userEmail String

    Email address of the user who ran the job.

    copy JobCopy

    Copies a table. Structure is documented below.

    effectiveLabels {[key: string]: string}

    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    extract JobExtract

    Configures an extract job. Structure is documented below.

    jobId string

    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.

    jobTimeoutMs string

    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.

    jobType string

    (Output) The type of the job.

    labels {[key: string]: string}

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load JobLoad

    Configures a load job. Structure is documented below.

    location string

    The geographic location of the job. The default value is US.

    project string

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    pulumiLabels {[key: string]: string}

    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.

    query JobQuery

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    statuses JobStatus[]

    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.

    userEmail string

    Email address of the user who ran the job.

    copy JobCopyArgs

    Copies a table. Structure is documented below.

    effective_labels Mapping[str, str]

    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    extract JobExtractArgs

    Configures an extract job. Structure is documented below.

    job_id str

    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.

    job_timeout_ms str

    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.

    job_type str

    (Output) The type of the job.

    labels Mapping[str, str]

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load JobLoadArgs

    Configures a load job. Structure is documented below.

    location str

    The geographic location of the job. The default value is US.

    project str

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    pulumi_labels Mapping[str, str]

    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.

    query JobQueryArgs

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    statuses Sequence[JobStatusArgs]

    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.

    user_email str

    Email address of the user who ran the job.

    copy Property Map

    Copies a table. Structure is documented below.

    effectiveLabels Map<String>

    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.

    extract Property Map

    Configures an extract job. Structure is documented below.

    jobId String

    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.

    jobTimeoutMs String

    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.

    jobType String

    (Output) The type of the job.

    labels Map<String>

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load Property Map

    Configures a load job. Structure is documented below.

    location String

    The geographic location of the job. The default value is US.

    project String

    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    pulumiLabels Map<String>

    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.

    query Property Map

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    statuses List<Property Map>

    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.

    userEmail String

    Email address of the user who ran the job.

    Supporting Types

    JobCopy, JobCopyArgs

    SourceTables List<JobCopySourceTable>

    Source tables to copy. Structure is documented below.

    CreateDisposition string

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    DestinationEncryptionConfiguration JobCopyDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    DestinationTable JobCopyDestinationTable

    The destination table. Structure is documented below.

    WriteDisposition string

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    SourceTables []JobCopySourceTable

    Source tables to copy. Structure is documented below.

    CreateDisposition string

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    DestinationEncryptionConfiguration JobCopyDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    DestinationTable JobCopyDestinationTable

    The destination table. Structure is documented below.

    WriteDisposition string

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    sourceTables List<JobCopySourceTable>

    Source tables to copy. Structure is documented below.

    createDisposition String

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    destinationEncryptionConfiguration JobCopyDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    destinationTable JobCopyDestinationTable

    The destination table. Structure is documented below.

    writeDisposition String

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    sourceTables JobCopySourceTable[]

    Source tables to copy. Structure is documented below.

    createDisposition string

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    destinationEncryptionConfiguration JobCopyDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    destinationTable JobCopyDestinationTable

    The destination table. Structure is documented below.

    writeDisposition string

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    source_tables Sequence[JobCopySourceTable]

    Source tables to copy. Structure is documented below.

    create_disposition str

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    destination_encryption_configuration JobCopyDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    destination_table JobCopyDestinationTable

    The destination table. Structure is documented below.

    write_disposition str

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    sourceTables List<Property Map>

    Source tables to copy. Structure is documented below.

    createDisposition String

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    destinationEncryptionConfiguration Property Map

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    destinationTable Property Map

    The destination table. Structure is documented below.

    writeDisposition String

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    JobCopyDestinationEncryptionConfiguration, JobCopyDestinationEncryptionConfigurationArgs

    KmsKeyName string

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    KmsKeyVersion string

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    KmsKeyName string

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    KmsKeyVersion string

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    kmsKeyName String

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    kmsKeyVersion String

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    kmsKeyName string

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    kmsKeyVersion string

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    kms_key_name str

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    kms_key_version str

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    kmsKeyName String

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    kmsKeyVersion String

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    JobCopyDestinationTable, JobCopyDestinationTableArgs

    TableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    DatasetId string

    The ID of the dataset containing this table.

    ProjectId string

    The ID of the project containing this table.

    TableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    DatasetId string

    The ID of the dataset containing this table.

    ProjectId string

    The ID of the project containing this table.

    tableId String

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId String

    The ID of the dataset containing this table.

    projectId String

    The ID of the project containing this table.

    tableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId string

    The ID of the dataset containing this table.

    projectId string

    The ID of the project containing this table.

    table_id str

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    dataset_id str

    The ID of the dataset containing this table.

    project_id str

    The ID of the project containing this table.

    tableId String

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId String

    The ID of the dataset containing this table.

    projectId String

    The ID of the project containing this table.

    JobCopySourceTable, JobCopySourceTableArgs

    TableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    DatasetId string

    The ID of the dataset containing this table.

    ProjectId string

    The ID of the project containing this table.

    TableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    DatasetId string

    The ID of the dataset containing this table.

    ProjectId string

    The ID of the project containing this table.

    tableId String

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId String

    The ID of the dataset containing this table.

    projectId String

    The ID of the project containing this table.

    tableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId string

    The ID of the dataset containing this table.

    projectId string

    The ID of the project containing this table.

    table_id str

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    dataset_id str

    The ID of the dataset containing this table.

    project_id str

    The ID of the project containing this table.

    tableId String

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId String

    The ID of the dataset containing this table.

    projectId String

    The ID of the project containing this table.

    JobExtract, JobExtractArgs

    DestinationUris List<string>

    A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.

    Compression string

    The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.

    DestinationFormat string

    The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.

    FieldDelimiter string

    When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','

    PrintHeader bool

    Whether to print out a header row in the results. Default is true.

    SourceModel JobExtractSourceModel

    A reference to the model being exported. Structure is documented below.

    SourceTable JobExtractSourceTable

    A reference to the table being exported. Structure is documented below.

    UseAvroLogicalTypes bool

    Whether to use logical types when extracting to AVRO format.

    DestinationUris []string

    A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.

    Compression string

    The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.

    DestinationFormat string

    The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.

    FieldDelimiter string

    When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','

    PrintHeader bool

    Whether to print out a header row in the results. Default is true.

    SourceModel JobExtractSourceModel

    A reference to the model being exported. Structure is documented below.

    SourceTable JobExtractSourceTable

    A reference to the table being exported. Structure is documented below.

    UseAvroLogicalTypes bool

    Whether to use logical types when extracting to AVRO format.

    destinationUris List<String>

    A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.

    compression String

    The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.

    destinationFormat String

    The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.

    fieldDelimiter String

    When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','

    printHeader Boolean

    Whether to print out a header row in the results. Default is true.

    sourceModel JobExtractSourceModel

    A reference to the model being exported. Structure is documented below.

    sourceTable JobExtractSourceTable

    A reference to the table being exported. Structure is documented below.

    useAvroLogicalTypes Boolean

    Whether to use logical types when extracting to AVRO format.

    destinationUris string[]

    A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.

    compression string

    The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.

    destinationFormat string

    The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.

    fieldDelimiter string

    When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','

    printHeader boolean

    Whether to print out a header row in the results. Default is true.

    sourceModel JobExtractSourceModel

    A reference to the model being exported. Structure is documented below.

    sourceTable JobExtractSourceTable

    A reference to the table being exported. Structure is documented below.

    useAvroLogicalTypes boolean

    Whether to use logical types when extracting to AVRO format.

    destination_uris Sequence[str]

    A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.

    compression str

    The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.

    destination_format str

    The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.

    field_delimiter str

    When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','

    print_header bool

    Whether to print out a header row in the results. Default is true.

    source_model JobExtractSourceModel

    A reference to the model being exported. Structure is documented below.

    source_table JobExtractSourceTable

    A reference to the table being exported. Structure is documented below.

    use_avro_logical_types bool

    Whether to use logical types when extracting to AVRO format.

    destinationUris List<String>

    A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.

    compression String

    The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.

    destinationFormat String

    The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.

    fieldDelimiter String

    When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','

    printHeader Boolean

    Whether to print out a header row in the results. Default is true.

    sourceModel Property Map

    A reference to the model being exported. Structure is documented below.

    sourceTable Property Map

    A reference to the table being exported. Structure is documented below.

    useAvroLogicalTypes Boolean

    Whether to use logical types when extracting to AVRO format.

    JobExtractSourceModel, JobExtractSourceModelArgs

    DatasetId string

    The ID of the dataset containing this model.

    ModelId string

    The ID of the model.


    ProjectId string

    The ID of the project containing this model.

    DatasetId string

    The ID of the dataset containing this model.

    ModelId string

    The ID of the model.


    ProjectId string

    The ID of the project containing this model.

    datasetId String

    The ID of the dataset containing this model.

    modelId String

    The ID of the model.


    projectId String

    The ID of the project containing this model.

    datasetId string

    The ID of the dataset containing this model.

    modelId string

    The ID of the model.


    projectId string

    The ID of the project containing this model.

    dataset_id str

    The ID of the dataset containing this model.

    model_id str

    The ID of the model.


    project_id str

    The ID of the project containing this model.

    datasetId String

    The ID of the dataset containing this model.

    modelId String

    The ID of the model.


    projectId String

    The ID of the project containing this model.

    JobExtractSourceTable, JobExtractSourceTableArgs

    TableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    DatasetId string

    The ID of the dataset containing this table.

    ProjectId string

    The ID of the project containing this table.

    TableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    DatasetId string

    The ID of the dataset containing this table.

    ProjectId string

    The ID of the project containing this table.

    tableId String

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId String

    The ID of the dataset containing this table.

    projectId String

    The ID of the project containing this table.

    tableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId string

    The ID of the dataset containing this table.

    projectId string

    The ID of the project containing this table.

    table_id str

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    dataset_id str

    The ID of the dataset containing this table.

    project_id str

    The ID of the project containing this table.

    tableId String

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId String

    The ID of the dataset containing this table.

    projectId String

    The ID of the project containing this table.

    JobLoad, JobLoadArgs

    DestinationTable JobLoadDestinationTable

    The destination table to load the data into. Structure is documented below.

    SourceUris List<string>

    The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.

    AllowJaggedRows bool

    Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.

    AllowQuotedNewlines bool

    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.

    Autodetect bool

    Indicates if we should automatically infer the options and schema for CSV and JSON sources.

    CreateDisposition string

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    DestinationEncryptionConfiguration JobLoadDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    Encoding string

    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.

    FieldDelimiter string

    The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').

    IgnoreUnknownValues bool

    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names

    JsonExtension string

    If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.

    MaxBadRecords int

    The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.

    NullMarker string

    Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.

    ParquetOptions JobLoadParquetOptions

    Parquet Options for load and make external tables. Structure is documented below.

    ProjectionFields List<string>

    If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.

    Quote string

    The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.

    SchemaUpdateOptions List<string>

    Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.

    SkipLeadingRows int

    The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.

    SourceFormat string

    The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.

    TimePartitioning JobLoadTimePartitioning

    Time-based partitioning specification for the destination table. Structure is documented below.

    WriteDisposition string

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    DestinationTable JobLoadDestinationTable

    The destination table to load the data into. Structure is documented below.

    SourceUris []string

    The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.

    AllowJaggedRows bool

    Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.

    AllowQuotedNewlines bool

    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.

    Autodetect bool

    Indicates if we should automatically infer the options and schema for CSV and JSON sources.

    CreateDisposition string

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    DestinationEncryptionConfiguration JobLoadDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    Encoding string

    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.

    FieldDelimiter string

    The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').

    IgnoreUnknownValues bool

    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names

    JsonExtension string

    If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.

    MaxBadRecords int

    The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.

    NullMarker string

    Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.

    ParquetOptions JobLoadParquetOptions

    Parquet Options for load and make external tables. Structure is documented below.

    ProjectionFields []string

    If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.

    Quote string

    The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.

    SchemaUpdateOptions []string

    Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.

    SkipLeadingRows int

    The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.

    SourceFormat string

    The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.

    TimePartitioning JobLoadTimePartitioning

    Time-based partitioning specification for the destination table. Structure is documented below.

    WriteDisposition string

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    destinationTable JobLoadDestinationTable

    The destination table to load the data into. Structure is documented below.

    sourceUris List<String>

    The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.

    allowJaggedRows Boolean

    Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.

    allowQuotedNewlines Boolean

    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.

    autodetect Boolean

    Indicates if we should automatically infer the options and schema for CSV and JSON sources.

    createDisposition String

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    destinationEncryptionConfiguration JobLoadDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    encoding String

    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.

    fieldDelimiter String

    The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').

    ignoreUnknownValues Boolean

    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names

    jsonExtension String

    If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.

    maxBadRecords Integer

    The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.

    nullMarker String

    Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.

    parquetOptions JobLoadParquetOptions

    Parquet Options for load and make external tables. Structure is documented below.

    projectionFields List<String>

    If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.

    quote String

    The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.

    schemaUpdateOptions List<String>

    Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.

    skipLeadingRows Integer

    The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.

    sourceFormat String

    The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.

    timePartitioning JobLoadTimePartitioning

    Time-based partitioning specification for the destination table. Structure is documented below.

    writeDisposition String

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    destinationTable JobLoadDestinationTable

    The destination table to load the data into. Structure is documented below.

    sourceUris string[]

    The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.

    allowJaggedRows boolean

    Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.

    allowQuotedNewlines boolean

    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.

    autodetect boolean

    Indicates if we should automatically infer the options and schema for CSV and JSON sources.

    createDisposition string

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    destinationEncryptionConfiguration JobLoadDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    encoding string

    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.

    fieldDelimiter string

    The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').

    ignoreUnknownValues boolean

    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names

    jsonExtension string

    If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.

    maxBadRecords number

    The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.

    nullMarker string

    Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.

    parquetOptions JobLoadParquetOptions

    Parquet Options for load and make external tables. Structure is documented below.

    projectionFields string[]

    If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.

    quote string

    The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.

    schemaUpdateOptions string[]

    Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.

    skipLeadingRows number

    The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.

    sourceFormat string

    The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.

    timePartitioning JobLoadTimePartitioning

    Time-based partitioning specification for the destination table. Structure is documented below.

    writeDisposition string

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    destination_table JobLoadDestinationTable

    The destination table to load the data into. Structure is documented below.

    source_uris Sequence[str]

    The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.

    allow_jagged_rows bool

    Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.

    allow_quoted_newlines bool

    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.

    autodetect bool

    Indicates if we should automatically infer the options and schema for CSV and JSON sources.

    create_disposition str

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    destination_encryption_configuration JobLoadDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    encoding str

    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.

    field_delimiter str

    The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').

    ignore_unknown_values bool

    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names

    json_extension str

    If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.

    max_bad_records int

    The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.

    null_marker str

    Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.

    parquet_options JobLoadParquetOptions

    Parquet Options for load and make external tables. Structure is documented below.

    projection_fields Sequence[str]

    If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.

    quote str

    The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.

    schema_update_options Sequence[str]

    Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.

    skip_leading_rows int

    The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.

    source_format str

    The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.

    time_partitioning JobLoadTimePartitioning

    Time-based partitioning specification for the destination table. Structure is documented below.

    write_disposition str

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    destinationTable Property Map

    The destination table to load the data into. Structure is documented below.

    sourceUris List<String>

    The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.

    allowJaggedRows Boolean

    Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.

    allowQuotedNewlines Boolean

    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.

    autodetect Boolean

    Indicates if we should automatically infer the options and schema for CSV and JSON sources.

    createDisposition String

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    destinationEncryptionConfiguration Property Map

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    encoding String

    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.

    fieldDelimiter String

    The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').

    ignoreUnknownValues Boolean

    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names

    jsonExtension String

    If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.

    maxBadRecords Number

    The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.

    nullMarker String

    Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.

    parquetOptions Property Map

    Parquet Options for load and make external tables. Structure is documented below.

    projectionFields List<String>

    If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.

    quote String

    The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.

    schemaUpdateOptions List<String>

    Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.

    skipLeadingRows Number

    The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.

    sourceFormat String

    The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.

    timePartitioning Property Map

    Time-based partitioning specification for the destination table. Structure is documented below.

    writeDisposition String

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    JobLoadDestinationEncryptionConfiguration, JobLoadDestinationEncryptionConfigurationArgs

    KmsKeyName string

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    KmsKeyVersion string

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    KmsKeyName string

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    KmsKeyVersion string

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    kmsKeyName String

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    kmsKeyVersion String

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    kmsKeyName string

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    kmsKeyVersion string

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    kms_key_name str

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    kms_key_version str

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    kmsKeyName String

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    kmsKeyVersion String

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    JobLoadDestinationTable, JobLoadDestinationTableArgs

    TableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    DatasetId string

    The ID of the dataset containing this table.

    ProjectId string

    The ID of the project containing this table.

    TableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    DatasetId string

    The ID of the dataset containing this table.

    ProjectId string

    The ID of the project containing this table.

    tableId String

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId String

    The ID of the dataset containing this table.

    projectId String

    The ID of the project containing this table.

    tableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId string

    The ID of the dataset containing this table.

    projectId string

    The ID of the project containing this table.

    table_id str

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    dataset_id str

    The ID of the dataset containing this table.

    project_id str

    The ID of the project containing this table.

    tableId String

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId String

    The ID of the dataset containing this table.

    projectId String

    The ID of the project containing this table.

    JobLoadParquetOptions, JobLoadParquetOptionsArgs

    EnableListInference bool

    If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.

    EnumAsString bool

    If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.

    EnableListInference bool

    If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.

    EnumAsString bool

    If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.

    enableListInference Boolean

    If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.

    enumAsString Boolean

    If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.

    enableListInference boolean

    If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.

    enumAsString boolean

    If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.

    enable_list_inference bool

    If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.

    enum_as_string bool

    If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.

    enableListInference Boolean

    If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.

    enumAsString Boolean

    If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.

    JobLoadTimePartitioning, JobLoadTimePartitioningArgs

    Type string

    The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.

    ExpirationMs string

    Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.

    Field string

    If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.

    Type string

    The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.

    ExpirationMs string

    Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.

    Field string

    If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.

    type String

    The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.

    expirationMs String

    Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.

    field String

    If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.

    type string

    The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.

    expirationMs string

    Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.

    field string

    If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.

    type str

    The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.

    expiration_ms str

    Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.

    field str

    If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.

    type String

    The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.

    expirationMs String

    Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.

    field String

    If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.

    JobQuery, JobQueryArgs

    Query string

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    AllowLargeResults bool

    If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.

    CreateDisposition string

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    DefaultDataset JobQueryDefaultDataset

    Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.

    DestinationEncryptionConfiguration JobQueryDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    DestinationTable JobQueryDestinationTable

    Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.

    FlattenResults bool

    If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.

    MaximumBillingTier int

    Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.

    MaximumBytesBilled string

    Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.

    ParameterMode string

    Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.

    Priority string

    Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.

    SchemaUpdateOptions List<string>

    Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.

    ScriptOptions JobQueryScriptOptions

    Options controlling the execution of scripts. Structure is documented below.

    UseLegacySql bool

    Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.

    UseQueryCache bool

    Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.

    UserDefinedFunctionResources List<JobQueryUserDefinedFunctionResource>

    Describes user-defined function resources used in the query. Structure is documented below.

    WriteDisposition string

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    Query string

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    AllowLargeResults bool

    If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.

    CreateDisposition string

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    DefaultDataset JobQueryDefaultDataset

    Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.

    DestinationEncryptionConfiguration JobQueryDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    DestinationTable JobQueryDestinationTable

    Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.

    FlattenResults bool

    If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.

    MaximumBillingTier int

    Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.

    MaximumBytesBilled string

    Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.

    ParameterMode string

    Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.

    Priority string

    Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.

    SchemaUpdateOptions []string

    Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.

    ScriptOptions JobQueryScriptOptions

    Options controlling the execution of scripts. Structure is documented below.

    UseLegacySql bool

    Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.

    UseQueryCache bool

    Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.

    UserDefinedFunctionResources []JobQueryUserDefinedFunctionResource

    Describes user-defined function resources used in the query. Structure is documented below.

    WriteDisposition string

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    query String

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    allowLargeResults Boolean

    If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.

    createDisposition String

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    defaultDataset JobQueryDefaultDataset

    Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.

    destinationEncryptionConfiguration JobQueryDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    destinationTable JobQueryDestinationTable

    Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.

    flattenResults Boolean

    If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.

    maximumBillingTier Integer

    Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.

    maximumBytesBilled String

    Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.

    parameterMode String

    Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.

    priority String

    Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.

    schemaUpdateOptions List<String>

    Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.

    scriptOptions JobQueryScriptOptions

    Options controlling the execution of scripts. Structure is documented below.

    useLegacySql Boolean

    Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.

    useQueryCache Boolean

    Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.

    userDefinedFunctionResources List<JobQueryUserDefinedFunctionResource>

    Describes user-defined function resources used in the query. Structure is documented below.

    writeDisposition String

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    query string

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    allowLargeResults boolean

    If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.

    createDisposition string

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    defaultDataset JobQueryDefaultDataset

    Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.

    destinationEncryptionConfiguration JobQueryDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    destinationTable JobQueryDestinationTable

    Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.

    flattenResults boolean

    If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.

    maximumBillingTier number

    Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.

    maximumBytesBilled string

    Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.

    parameterMode string

    Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.

    priority string

    Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.

    schemaUpdateOptions string[]

    Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.

    scriptOptions JobQueryScriptOptions

    Options controlling the execution of scripts. Structure is documented below.

    useLegacySql boolean

    Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.

    useQueryCache boolean

    Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.

    userDefinedFunctionResources JobQueryUserDefinedFunctionResource[]

    Describes user-defined function resources used in the query. Structure is documented below.

    writeDisposition string

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    query str

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    allow_large_results bool

    If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.

    create_disposition str

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    default_dataset JobQueryDefaultDataset

    Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.

    destination_encryption_configuration JobQueryDestinationEncryptionConfiguration

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    destination_table JobQueryDestinationTable

    Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.

    flatten_results bool

    If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.

    maximum_billing_tier int

    Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.

    maximum_bytes_billed str

    Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.

    parameter_mode str

    Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.

    priority str

    Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.

    schema_update_options Sequence[str]

    Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.

    script_options JobQueryScriptOptions

    Options controlling the execution of scripts. Structure is documented below.

    use_legacy_sql bool

    Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.

    use_query_cache bool

    Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.

    user_defined_function_resources Sequence[JobQueryUserDefinedFunctionResource]

    Describes user-defined function resources used in the query. Structure is documented below.

    write_disposition str

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    query String

    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    allowLargeResults Boolean

    If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.

    createDisposition String

    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.

    defaultDataset Property Map

    Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.

    destinationEncryptionConfiguration Property Map

    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.

    destinationTable Property Map

    Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.

    flattenResults Boolean

    If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.

    maximumBillingTier Number

    Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.

    maximumBytesBilled String

    Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.

    parameterMode String

    Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.

    priority String

    Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.

    schemaUpdateOptions List<String>

    Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.

    scriptOptions Property Map

    Options controlling the execution of scripts. Structure is documented below.

    useLegacySql Boolean

    Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.

    useQueryCache Boolean

    Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.

    userDefinedFunctionResources List<Property Map>

    Describes user-defined function resources used in the query. Structure is documented below.

    writeDisposition String

    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    JobQueryDefaultDataset, JobQueryDefaultDatasetArgs

    DatasetId string

    The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.

    ProjectId string

    The ID of the project containing this table.

    DatasetId string

    The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.

    ProjectId string

    The ID of the project containing this table.

    datasetId String

    The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.

    projectId String

    The ID of the project containing this table.

    datasetId string

    The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.

    projectId string

    The ID of the project containing this table.

    dataset_id str

    The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.

    project_id str

    The ID of the project containing this table.

    datasetId String

    The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.

    projectId String

    The ID of the project containing this table.

    JobQueryDestinationEncryptionConfiguration, JobQueryDestinationEncryptionConfigurationArgs

    KmsKeyName string

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    KmsKeyVersion string

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    KmsKeyName string

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    KmsKeyVersion string

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    kmsKeyName String

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    kmsKeyVersion String

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    kmsKeyName string

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    kmsKeyVersion string

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    kms_key_name str

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    kms_key_version str

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    kmsKeyName String

    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.

    kmsKeyVersion String

    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    JobQueryDestinationTable, JobQueryDestinationTableArgs

    TableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    DatasetId string

    The ID of the dataset containing this table.

    ProjectId string

    The ID of the project containing this table.

    TableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    DatasetId string

    The ID of the dataset containing this table.

    ProjectId string

    The ID of the project containing this table.

    tableId String

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId String

    The ID of the dataset containing this table.

    projectId String

    The ID of the project containing this table.

    tableId string

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId string

    The ID of the dataset containing this table.

    projectId string

    The ID of the project containing this table.

    table_id str

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    dataset_id str

    The ID of the dataset containing this table.

    project_id str

    The ID of the project containing this table.

    tableId String

    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.

    datasetId String

    The ID of the dataset containing this table.

    projectId String

    The ID of the project containing this table.

    JobQueryScriptOptions, JobQueryScriptOptionsArgs

    KeyResultStatement string

    Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.

    StatementByteBudget string

    Limit on the number of bytes billed per statement. Exceeding this budget results in an error.

    StatementTimeoutMs string

    Timeout period for each statement in a script.

    KeyResultStatement string

    Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.

    StatementByteBudget string

    Limit on the number of bytes billed per statement. Exceeding this budget results in an error.

    StatementTimeoutMs string

    Timeout period for each statement in a script.

    keyResultStatement String

    Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.

    statementByteBudget String

    Limit on the number of bytes billed per statement. Exceeding this budget results in an error.

    statementTimeoutMs String

    Timeout period for each statement in a script.

    keyResultStatement string

    Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.

    statementByteBudget string

    Limit on the number of bytes billed per statement. Exceeding this budget results in an error.

    statementTimeoutMs string

    Timeout period for each statement in a script.

    key_result_statement str

    Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.

    statement_byte_budget str

    Limit on the number of bytes billed per statement. Exceeding this budget results in an error.

    statement_timeout_ms str

    Timeout period for each statement in a script.

    keyResultStatement String

    Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.

    statementByteBudget String

    Limit on the number of bytes billed per statement. Exceeding this budget results in an error.

    statementTimeoutMs String

    Timeout period for each statement in a script.

    JobQueryUserDefinedFunctionResource, JobQueryUserDefinedFunctionResourceArgs

    InlineCode string

    An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.

    ResourceUri string

    A code resource to load from a Google Cloud Storage URI (gs://bucket/path).

    InlineCode string

    An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.

    ResourceUri string

    A code resource to load from a Google Cloud Storage URI (gs://bucket/path).

    inlineCode String

    An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.

    resourceUri String

    A code resource to load from a Google Cloud Storage URI (gs://bucket/path).

    inlineCode string

    An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.

    resourceUri string

    A code resource to load from a Google Cloud Storage URI (gs://bucket/path).

    inline_code str

    An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.

    resource_uri str

    A code resource to load from a Google Cloud Storage URI (gs://bucket/path).

    inlineCode String

    An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.

    resourceUri String

    A code resource to load from a Google Cloud Storage URI (gs://bucket/path).

    JobStatus, JobStatusArgs

    ErrorResults List<JobStatusErrorResult>

    (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.

    Errors List<JobStatusError>

    (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.

    State string

    (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.

    ErrorResults []JobStatusErrorResult

    (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.

    Errors []JobStatusError

    (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.

    State string

    (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.

    errorResults List<JobStatusErrorResult>

    (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.

    errors List<JobStatusError>

    (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.

    state String

    (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.

    errorResults JobStatusErrorResult[]

    (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.

    errors JobStatusError[]

    (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.

    state string

    (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.

    error_results Sequence[JobStatusErrorResult]

    (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.

    errors Sequence[JobStatusError]

    (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.

    state str

    (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.

    errorResults List<Property Map>

    (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.

    errors List<Property Map>

    (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.

    state String

    (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.

    JobStatusError, JobStatusErrorArgs

    Location string

    The geographic location of the job. The default value is US.

    Message string

    A human-readable description of the error.

    Reason string

    A short error code that summarizes the error.

    Location string

    The geographic location of the job. The default value is US.

    Message string

    A human-readable description of the error.

    Reason string

    A short error code that summarizes the error.

    location String

    The geographic location of the job. The default value is US.

    message String

    A human-readable description of the error.

    reason String

    A short error code that summarizes the error.

    location string

    The geographic location of the job. The default value is US.

    message string

    A human-readable description of the error.

    reason string

    A short error code that summarizes the error.

    location str

    The geographic location of the job. The default value is US.

    message str

    A human-readable description of the error.

    reason str

    A short error code that summarizes the error.

    location String

    The geographic location of the job. The default value is US.

    message String

    A human-readable description of the error.

    reason String

    A short error code that summarizes the error.

    JobStatusErrorResult, JobStatusErrorResultArgs

    Location string

    The geographic location of the job. The default value is US.

    Message string

    A human-readable description of the error.

    Reason string

    A short error code that summarizes the error.

    Location string

    The geographic location of the job. The default value is US.

    Message string

    A human-readable description of the error.

    Reason string

    A short error code that summarizes the error.

    location String

    The geographic location of the job. The default value is US.

    message String

    A human-readable description of the error.

    reason String

    A short error code that summarizes the error.

    location string

    The geographic location of the job. The default value is US.

    message string

    A human-readable description of the error.

    reason string

    A short error code that summarizes the error.

    location str

    The geographic location of the job. The default value is US.

    message str

    A human-readable description of the error.

    reason str

    A short error code that summarizes the error.

    location String

    The geographic location of the job. The default value is US.

    message String

    A human-readable description of the error.

    reason String

    A short error code that summarizes the error.

    Import

    Job can be imported using any of these accepted formats* projects/{{project}}/jobs/{{job_id}}/location/{{location}} * projects/{{project}}/jobs/{{job_id}} * {{project}}/{{job_id}}/{{location}} * {{job_id}}/{{location}} * {{project}}/{{job_id}} * {{job_id}} In Terraform v1.5.0 and later, use an import block to import Job using one of the formats above. For exampletf import {

    id = “projects/{{project}}/jobs/{{job_id}}/location/{{location}}”

    to = google_bigquery_job.default }

     $ pulumi import gcp:bigquery/job:Job When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Job can be imported using one of the formats above. For example
    
     $ pulumi import gcp:bigquery/job:Job default projects/{{project}}/jobs/{{job_id}}/location/{{location}}
    
     $ pulumi import gcp:bigquery/job:Job default projects/{{project}}/jobs/{{job_id}}
    
     $ pulumi import gcp:bigquery/job:Job default {{project}}/{{job_id}}/{{location}}
    
     $ pulumi import gcp:bigquery/job:Job default {{job_id}}/{{location}}
    
     $ pulumi import gcp:bigquery/job:Job default {{project}}/{{job_id}}
    
     $ pulumi import gcp:bigquery/job:Job default {{job_id}}
    

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes

    This Pulumi package is based on the google-beta Terraform Provider.

    gcp logo
    Google Cloud Classic v7.2.1 published on Wednesday, Nov 22, 2023 by Pulumi