1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. bigquery
  5. Job
Google Cloud Classic v7.19.0 published on Thursday, Apr 18, 2024 by Pulumi

gcp.bigquery.Job

Explore with Pulumi AI

gcp logo
Google Cloud Classic v7.19.0 published on Thursday, Apr 18, 2024 by Pulumi

    Jobs are actions that BigQuery runs on your behalf to load data, export data, query data, or copy data. Once a BigQuery job is created, it cannot be changed or deleted.

    To get more information about Job, see:

    Example Usage

    Bigquery Job Query

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const bar = new gcp.bigquery.Dataset("bar", {
        datasetId: "job_query_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    });
    const foo = new gcp.bigquery.Table("foo", {
        deletionProtection: false,
        datasetId: bar.datasetId,
        tableId: "job_query_table",
    });
    const job = new gcp.bigquery.Job("job", {
        jobId: "job_query",
        labels: {
            "example-label": "example-value",
        },
        query: {
            query: "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
            destinationTable: {
                projectId: foo.project,
                datasetId: foo.datasetId,
                tableId: foo.tableId,
            },
            allowLargeResults: true,
            flattenResults: true,
            scriptOptions: {
                keyResultStatement: "LAST",
            },
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    bar = gcp.bigquery.Dataset("bar",
        dataset_id="job_query_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US")
    foo = gcp.bigquery.Table("foo",
        deletion_protection=False,
        dataset_id=bar.dataset_id,
        table_id="job_query_table")
    job = gcp.bigquery.Job("job",
        job_id="job_query",
        labels={
            "example-label": "example-value",
        },
        query=gcp.bigquery.JobQueryArgs(
            query="SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
            destination_table=gcp.bigquery.JobQueryDestinationTableArgs(
                project_id=foo.project,
                dataset_id=foo.dataset_id,
                table_id=foo.table_id,
            ),
            allow_large_results=True,
            flatten_results=True,
            script_options=gcp.bigquery.JobQueryScriptOptionsArgs(
                key_result_statement="LAST",
            ),
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
    			DatasetId:    pulumi.String("job_query_dataset"),
    			FriendlyName: pulumi.String("test"),
    			Description:  pulumi.String("This is a test description"),
    			Location:     pulumi.String("US"),
    		})
    		if err != nil {
    			return err
    		}
    		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
    			DeletionProtection: pulumi.Bool(false),
    			DatasetId:          bar.DatasetId,
    			TableId:            pulumi.String("job_query_table"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
    			JobId: pulumi.String("job_query"),
    			Labels: pulumi.StringMap{
    				"example-label": pulumi.String("example-value"),
    			},
    			Query: &bigquery.JobQueryArgs{
    				Query: pulumi.String("SELECT state FROM [lookerdata:cdc.project_tycho_reports]"),
    				DestinationTable: &bigquery.JobQueryDestinationTableArgs{
    					ProjectId: foo.Project,
    					DatasetId: foo.DatasetId,
    					TableId:   foo.TableId,
    				},
    				AllowLargeResults: pulumi.Bool(true),
    				FlattenResults:    pulumi.Bool(true),
    				ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
    					KeyResultStatement: pulumi.String("LAST"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var bar = new Gcp.BigQuery.Dataset("bar", new()
        {
            DatasetId = "job_query_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        });
    
        var foo = new Gcp.BigQuery.Table("foo", new()
        {
            DeletionProtection = false,
            DatasetId = bar.DatasetId,
            TableId = "job_query_table",
        });
    
        var job = new Gcp.BigQuery.Job("job", new()
        {
            JobId = "job_query",
            Labels = 
            {
                { "example-label", "example-value" },
            },
            Query = new Gcp.BigQuery.Inputs.JobQueryArgs
            {
                Query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
                DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
                {
                    ProjectId = foo.Project,
                    DatasetId = foo.DatasetId,
                    TableId = foo.TableId,
                },
                AllowLargeResults = true,
                FlattenResults = true,
                ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
                {
                    KeyResultStatement = "LAST",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.bigquery.Job;
    import com.pulumi.gcp.bigquery.JobArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryDestinationTableArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryScriptOptionsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var bar = new Dataset("bar", DatasetArgs.builder()        
                .datasetId("job_query_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
    
            var foo = new Table("foo", TableArgs.builder()        
                .deletionProtection(false)
                .datasetId(bar.datasetId())
                .tableId("job_query_table")
                .build());
    
            var job = new Job("job", JobArgs.builder()        
                .jobId("job_query")
                .labels(Map.of("example-label", "example-value"))
                .query(JobQueryArgs.builder()
                    .query("SELECT state FROM [lookerdata:cdc.project_tycho_reports]")
                    .destinationTable(JobQueryDestinationTableArgs.builder()
                        .projectId(foo.project())
                        .datasetId(foo.datasetId())
                        .tableId(foo.tableId())
                        .build())
                    .allowLargeResults(true)
                    .flattenResults(true)
                    .scriptOptions(JobQueryScriptOptionsArgs.builder()
                        .keyResultStatement("LAST")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      foo:
        type: gcp:bigquery:Table
        properties:
          deletionProtection: false
          datasetId: ${bar.datasetId}
          tableId: job_query_table
      bar:
        type: gcp:bigquery:Dataset
        properties:
          datasetId: job_query_dataset
          friendlyName: test
          description: This is a test description
          location: US
      job:
        type: gcp:bigquery:Job
        properties:
          jobId: job_query
          labels:
            example-label: example-value
          query:
            query: SELECT state FROM [lookerdata:cdc.project_tycho_reports]
            destinationTable:
              projectId: ${foo.project}
              datasetId: ${foo.datasetId}
              tableId: ${foo.tableId}
            allowLargeResults: true
            flattenResults: true
            scriptOptions:
              keyResultStatement: LAST
    

    Bigquery Job Query Table Reference

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const bar = new gcp.bigquery.Dataset("bar", {
        datasetId: "job_query_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    });
    const foo = new gcp.bigquery.Table("foo", {
        deletionProtection: false,
        datasetId: bar.datasetId,
        tableId: "job_query_table",
    });
    const job = new gcp.bigquery.Job("job", {
        jobId: "job_query",
        labels: {
            "example-label": "example-value",
        },
        query: {
            query: "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
            destinationTable: {
                tableId: foo.id,
            },
            defaultDataset: {
                datasetId: bar.id,
            },
            allowLargeResults: true,
            flattenResults: true,
            scriptOptions: {
                keyResultStatement: "LAST",
            },
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    bar = gcp.bigquery.Dataset("bar",
        dataset_id="job_query_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US")
    foo = gcp.bigquery.Table("foo",
        deletion_protection=False,
        dataset_id=bar.dataset_id,
        table_id="job_query_table")
    job = gcp.bigquery.Job("job",
        job_id="job_query",
        labels={
            "example-label": "example-value",
        },
        query=gcp.bigquery.JobQueryArgs(
            query="SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
            destination_table=gcp.bigquery.JobQueryDestinationTableArgs(
                table_id=foo.id,
            ),
            default_dataset=gcp.bigquery.JobQueryDefaultDatasetArgs(
                dataset_id=bar.id,
            ),
            allow_large_results=True,
            flatten_results=True,
            script_options=gcp.bigquery.JobQueryScriptOptionsArgs(
                key_result_statement="LAST",
            ),
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
    			DatasetId:    pulumi.String("job_query_dataset"),
    			FriendlyName: pulumi.String("test"),
    			Description:  pulumi.String("This is a test description"),
    			Location:     pulumi.String("US"),
    		})
    		if err != nil {
    			return err
    		}
    		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
    			DeletionProtection: pulumi.Bool(false),
    			DatasetId:          bar.DatasetId,
    			TableId:            pulumi.String("job_query_table"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
    			JobId: pulumi.String("job_query"),
    			Labels: pulumi.StringMap{
    				"example-label": pulumi.String("example-value"),
    			},
    			Query: &bigquery.JobQueryArgs{
    				Query: pulumi.String("SELECT state FROM [lookerdata:cdc.project_tycho_reports]"),
    				DestinationTable: &bigquery.JobQueryDestinationTableArgs{
    					TableId: foo.ID(),
    				},
    				DefaultDataset: &bigquery.JobQueryDefaultDatasetArgs{
    					DatasetId: bar.ID(),
    				},
    				AllowLargeResults: pulumi.Bool(true),
    				FlattenResults:    pulumi.Bool(true),
    				ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
    					KeyResultStatement: pulumi.String("LAST"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var bar = new Gcp.BigQuery.Dataset("bar", new()
        {
            DatasetId = "job_query_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        });
    
        var foo = new Gcp.BigQuery.Table("foo", new()
        {
            DeletionProtection = false,
            DatasetId = bar.DatasetId,
            TableId = "job_query_table",
        });
    
        var job = new Gcp.BigQuery.Job("job", new()
        {
            JobId = "job_query",
            Labels = 
            {
                { "example-label", "example-value" },
            },
            Query = new Gcp.BigQuery.Inputs.JobQueryArgs
            {
                Query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
                DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
                {
                    TableId = foo.Id,
                },
                DefaultDataset = new Gcp.BigQuery.Inputs.JobQueryDefaultDatasetArgs
                {
                    DatasetId = bar.Id,
                },
                AllowLargeResults = true,
                FlattenResults = true,
                ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
                {
                    KeyResultStatement = "LAST",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.bigquery.Job;
    import com.pulumi.gcp.bigquery.JobArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryDestinationTableArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryDefaultDatasetArgs;
    import com.pulumi.gcp.bigquery.inputs.JobQueryScriptOptionsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var bar = new Dataset("bar", DatasetArgs.builder()        
                .datasetId("job_query_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
    
            var foo = new Table("foo", TableArgs.builder()        
                .deletionProtection(false)
                .datasetId(bar.datasetId())
                .tableId("job_query_table")
                .build());
    
            var job = new Job("job", JobArgs.builder()        
                .jobId("job_query")
                .labels(Map.of("example-label", "example-value"))
                .query(JobQueryArgs.builder()
                    .query("SELECT state FROM [lookerdata:cdc.project_tycho_reports]")
                    .destinationTable(JobQueryDestinationTableArgs.builder()
                        .tableId(foo.id())
                        .build())
                    .defaultDataset(JobQueryDefaultDatasetArgs.builder()
                        .datasetId(bar.id())
                        .build())
                    .allowLargeResults(true)
                    .flattenResults(true)
                    .scriptOptions(JobQueryScriptOptionsArgs.builder()
                        .keyResultStatement("LAST")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      foo:
        type: gcp:bigquery:Table
        properties:
          deletionProtection: false
          datasetId: ${bar.datasetId}
          tableId: job_query_table
      bar:
        type: gcp:bigquery:Dataset
        properties:
          datasetId: job_query_dataset
          friendlyName: test
          description: This is a test description
          location: US
      job:
        type: gcp:bigquery:Job
        properties:
          jobId: job_query
          labels:
            example-label: example-value
          query:
            query: SELECT state FROM [lookerdata:cdc.project_tycho_reports]
            destinationTable:
              tableId: ${foo.id}
            defaultDataset:
              datasetId: ${bar.id}
            allowLargeResults: true
            flattenResults: true
            scriptOptions:
              keyResultStatement: LAST
    

    Bigquery Job Load

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const bar = new gcp.bigquery.Dataset("bar", {
        datasetId: "job_load_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    });
    const foo = new gcp.bigquery.Table("foo", {
        deletionProtection: false,
        datasetId: bar.datasetId,
        tableId: "job_load_table",
    });
    const job = new gcp.bigquery.Job("job", {
        jobId: "job_load",
        labels: {
            my_job: "load",
        },
        load: {
            sourceUris: ["gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"],
            destinationTable: {
                projectId: foo.project,
                datasetId: foo.datasetId,
                tableId: foo.tableId,
            },
            skipLeadingRows: 1,
            schemaUpdateOptions: [
                "ALLOW_FIELD_RELAXATION",
                "ALLOW_FIELD_ADDITION",
            ],
            writeDisposition: "WRITE_APPEND",
            autodetect: true,
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    bar = gcp.bigquery.Dataset("bar",
        dataset_id="job_load_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US")
    foo = gcp.bigquery.Table("foo",
        deletion_protection=False,
        dataset_id=bar.dataset_id,
        table_id="job_load_table")
    job = gcp.bigquery.Job("job",
        job_id="job_load",
        labels={
            "my_job": "load",
        },
        load=gcp.bigquery.JobLoadArgs(
            source_uris=["gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"],
            destination_table=gcp.bigquery.JobLoadDestinationTableArgs(
                project_id=foo.project,
                dataset_id=foo.dataset_id,
                table_id=foo.table_id,
            ),
            skip_leading_rows=1,
            schema_update_options=[
                "ALLOW_FIELD_RELAXATION",
                "ALLOW_FIELD_ADDITION",
            ],
            write_disposition="WRITE_APPEND",
            autodetect=True,
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
    			DatasetId:    pulumi.String("job_load_dataset"),
    			FriendlyName: pulumi.String("test"),
    			Description:  pulumi.String("This is a test description"),
    			Location:     pulumi.String("US"),
    		})
    		if err != nil {
    			return err
    		}
    		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
    			DeletionProtection: pulumi.Bool(false),
    			DatasetId:          bar.DatasetId,
    			TableId:            pulumi.String("job_load_table"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
    			JobId: pulumi.String("job_load"),
    			Labels: pulumi.StringMap{
    				"my_job": pulumi.String("load"),
    			},
    			Load: &bigquery.JobLoadArgs{
    				SourceUris: pulumi.StringArray{
    					pulumi.String("gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"),
    				},
    				DestinationTable: &bigquery.JobLoadDestinationTableArgs{
    					ProjectId: foo.Project,
    					DatasetId: foo.DatasetId,
    					TableId:   foo.TableId,
    				},
    				SkipLeadingRows: pulumi.Int(1),
    				SchemaUpdateOptions: pulumi.StringArray{
    					pulumi.String("ALLOW_FIELD_RELAXATION"),
    					pulumi.String("ALLOW_FIELD_ADDITION"),
    				},
    				WriteDisposition: pulumi.String("WRITE_APPEND"),
    				Autodetect:       pulumi.Bool(true),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var bar = new Gcp.BigQuery.Dataset("bar", new()
        {
            DatasetId = "job_load_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        });
    
        var foo = new Gcp.BigQuery.Table("foo", new()
        {
            DeletionProtection = false,
            DatasetId = bar.DatasetId,
            TableId = "job_load_table",
        });
    
        var job = new Gcp.BigQuery.Job("job", new()
        {
            JobId = "job_load",
            Labels = 
            {
                { "my_job", "load" },
            },
            Load = new Gcp.BigQuery.Inputs.JobLoadArgs
            {
                SourceUris = new[]
                {
                    "gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv",
                },
                DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
                {
                    ProjectId = foo.Project,
                    DatasetId = foo.DatasetId,
                    TableId = foo.TableId,
                },
                SkipLeadingRows = 1,
                SchemaUpdateOptions = new[]
                {
                    "ALLOW_FIELD_RELAXATION",
                    "ALLOW_FIELD_ADDITION",
                },
                WriteDisposition = "WRITE_APPEND",
                Autodetect = true,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.bigquery.Job;
    import com.pulumi.gcp.bigquery.JobArgs;
    import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
    import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var bar = new Dataset("bar", DatasetArgs.builder()        
                .datasetId("job_load_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
    
            var foo = new Table("foo", TableArgs.builder()        
                .deletionProtection(false)
                .datasetId(bar.datasetId())
                .tableId("job_load_table")
                .build());
    
            var job = new Job("job", JobArgs.builder()        
                .jobId("job_load")
                .labels(Map.of("my_job", "load"))
                .load(JobLoadArgs.builder()
                    .sourceUris("gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv")
                    .destinationTable(JobLoadDestinationTableArgs.builder()
                        .projectId(foo.project())
                        .datasetId(foo.datasetId())
                        .tableId(foo.tableId())
                        .build())
                    .skipLeadingRows(1)
                    .schemaUpdateOptions(                
                        "ALLOW_FIELD_RELAXATION",
                        "ALLOW_FIELD_ADDITION")
                    .writeDisposition("WRITE_APPEND")
                    .autodetect(true)
                    .build())
                .build());
    
        }
    }
    
    resources:
      foo:
        type: gcp:bigquery:Table
        properties:
          deletionProtection: false
          datasetId: ${bar.datasetId}
          tableId: job_load_table
      bar:
        type: gcp:bigquery:Dataset
        properties:
          datasetId: job_load_dataset
          friendlyName: test
          description: This is a test description
          location: US
      job:
        type: gcp:bigquery:Job
        properties:
          jobId: job_load
          labels:
            my_job: load
          load:
            sourceUris:
              - gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv
            destinationTable:
              projectId: ${foo.project}
              datasetId: ${foo.datasetId}
              tableId: ${foo.tableId}
            skipLeadingRows: 1
            schemaUpdateOptions:
              - ALLOW_FIELD_RELAXATION
              - ALLOW_FIELD_ADDITION
            writeDisposition: WRITE_APPEND
            autodetect: true
    

    Bigquery Job Load Geojson

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const project = "my-project-name";
    const bucket = new gcp.storage.Bucket("bucket", {
        name: `${project}-bq-geojson`,
        location: "US",
        uniformBucketLevelAccess: true,
    });
    const object = new gcp.storage.BucketObject("object", {
        name: "geojson-data.jsonl",
        bucket: bucket.name,
        content: `{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
    {"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
    `,
    });
    const bar = new gcp.bigquery.Dataset("bar", {
        datasetId: "job_load_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    });
    const foo = new gcp.bigquery.Table("foo", {
        deletionProtection: false,
        datasetId: bar.datasetId,
        tableId: "job_load_table",
    });
    const job = new gcp.bigquery.Job("job", {
        jobId: "job_load",
        labels: {
            my_job: "load",
        },
        load: {
            sourceUris: [pulumi.interpolate`gs://${object.bucket}/${object.name}`],
            destinationTable: {
                projectId: foo.project,
                datasetId: foo.datasetId,
                tableId: foo.tableId,
            },
            writeDisposition: "WRITE_TRUNCATE",
            autodetect: true,
            sourceFormat: "NEWLINE_DELIMITED_JSON",
            jsonExtension: "GEOJSON",
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    project = "my-project-name"
    bucket = gcp.storage.Bucket("bucket",
        name=f"{project}-bq-geojson",
        location="US",
        uniform_bucket_level_access=True)
    object = gcp.storage.BucketObject("object",
        name="geojson-data.jsonl",
        bucket=bucket.name,
        content="""{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
    {"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
    """)
    bar = gcp.bigquery.Dataset("bar",
        dataset_id="job_load_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US")
    foo = gcp.bigquery.Table("foo",
        deletion_protection=False,
        dataset_id=bar.dataset_id,
        table_id="job_load_table")
    job = gcp.bigquery.Job("job",
        job_id="job_load",
        labels={
            "my_job": "load",
        },
        load=gcp.bigquery.JobLoadArgs(
            source_uris=[pulumi.Output.all(object.bucket, object.name).apply(lambda bucket, name: f"gs://{bucket}/{name}")],
            destination_table=gcp.bigquery.JobLoadDestinationTableArgs(
                project_id=foo.project,
                dataset_id=foo.dataset_id,
                table_id=foo.table_id,
            ),
            write_disposition="WRITE_TRUNCATE",
            autodetect=True,
            source_format="NEWLINE_DELIMITED_JSON",
            json_extension="GEOJSON",
        ))
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/storage"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		project := "my-project-name"
    		bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
    			Name:                     pulumi.String(fmt.Sprintf("%v-bq-geojson", project)),
    			Location:                 pulumi.String("US"),
    			UniformBucketLevelAccess: pulumi.Bool(true),
    		})
    		if err != nil {
    			return err
    		}
    		object, err := storage.NewBucketObject(ctx, "object", &storage.BucketObjectArgs{
    			Name:    pulumi.String("geojson-data.jsonl"),
    			Bucket:  bucket.Name,
    			Content: pulumi.String("{\"type\":\"Feature\",\"properties\":{\"continent\":\"Europe\",\"region\":\"Scandinavia\"},\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}\n{\"type\":\"Feature\",\"properties\":{\"continent\":\"Africa\",\"region\":\"West Africa\"},\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}\n"),
    		})
    		if err != nil {
    			return err
    		}
    		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
    			DatasetId:    pulumi.String("job_load_dataset"),
    			FriendlyName: pulumi.String("test"),
    			Description:  pulumi.String("This is a test description"),
    			Location:     pulumi.String("US"),
    		})
    		if err != nil {
    			return err
    		}
    		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
    			DeletionProtection: pulumi.Bool(false),
    			DatasetId:          bar.DatasetId,
    			TableId:            pulumi.String("job_load_table"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
    			JobId: pulumi.String("job_load"),
    			Labels: pulumi.StringMap{
    				"my_job": pulumi.String("load"),
    			},
    			Load: &bigquery.JobLoadArgs{
    				SourceUris: pulumi.StringArray{
    					pulumi.All(object.Bucket, object.Name).ApplyT(func(_args []interface{}) (string, error) {
    						bucket := _args[0].(string)
    						name := _args[1].(string)
    						return fmt.Sprintf("gs://%v/%v", bucket, name), nil
    					}).(pulumi.StringOutput),
    				},
    				DestinationTable: &bigquery.JobLoadDestinationTableArgs{
    					ProjectId: foo.Project,
    					DatasetId: foo.DatasetId,
    					TableId:   foo.TableId,
    				},
    				WriteDisposition: pulumi.String("WRITE_TRUNCATE"),
    				Autodetect:       pulumi.Bool(true),
    				SourceFormat:     pulumi.String("NEWLINE_DELIMITED_JSON"),
    				JsonExtension:    pulumi.String("GEOJSON"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var project = "my-project-name";
    
        var bucket = new Gcp.Storage.Bucket("bucket", new()
        {
            Name = $"{project}-bq-geojson",
            Location = "US",
            UniformBucketLevelAccess = true,
        });
    
        var @object = new Gcp.Storage.BucketObject("object", new()
        {
            Name = "geojson-data.jsonl",
            Bucket = bucket.Name,
            Content = @"{""type"":""Feature"",""properties"":{""continent"":""Europe"",""region"":""Scandinavia""},""geometry"":{""type"":""Polygon"",""coordinates"":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
    {""type"":""Feature"",""properties"":{""continent"":""Africa"",""region"":""West Africa""},""geometry"":{""type"":""Polygon"",""coordinates"":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
    ",
        });
    
        var bar = new Gcp.BigQuery.Dataset("bar", new()
        {
            DatasetId = "job_load_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        });
    
        var foo = new Gcp.BigQuery.Table("foo", new()
        {
            DeletionProtection = false,
            DatasetId = bar.DatasetId,
            TableId = "job_load_table",
        });
    
        var job = new Gcp.BigQuery.Job("job", new()
        {
            JobId = "job_load",
            Labels = 
            {
                { "my_job", "load" },
            },
            Load = new Gcp.BigQuery.Inputs.JobLoadArgs
            {
                SourceUris = new[]
                {
                    Output.Tuple(@object.Bucket, @object.Name).Apply(values =>
                    {
                        var bucket = values.Item1;
                        var name = values.Item2;
                        return $"gs://{bucket}/{name}";
                    }),
                },
                DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
                {
                    ProjectId = foo.Project,
                    DatasetId = foo.DatasetId,
                    TableId = foo.TableId,
                },
                WriteDisposition = "WRITE_TRUNCATE",
                Autodetect = true,
                SourceFormat = "NEWLINE_DELIMITED_JSON",
                JsonExtension = "GEOJSON",
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.storage.Bucket;
    import com.pulumi.gcp.storage.BucketArgs;
    import com.pulumi.gcp.storage.BucketObject;
    import com.pulumi.gcp.storage.BucketObjectArgs;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.bigquery.Job;
    import com.pulumi.gcp.bigquery.JobArgs;
    import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
    import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var project = "my-project-name";
    
            var bucket = new Bucket("bucket", BucketArgs.builder()        
                .name(String.format("%s-bq-geojson", project))
                .location("US")
                .uniformBucketLevelAccess(true)
                .build());
    
            var object = new BucketObject("object", BucketObjectArgs.builder()        
                .name("geojson-data.jsonl")
                .bucket(bucket.name())
                .content("""
    {"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
    {"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
                """)
                .build());
    
            var bar = new Dataset("bar", DatasetArgs.builder()        
                .datasetId("job_load_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
    
            var foo = new Table("foo", TableArgs.builder()        
                .deletionProtection(false)
                .datasetId(bar.datasetId())
                .tableId("job_load_table")
                .build());
    
            var job = new Job("job", JobArgs.builder()        
                .jobId("job_load")
                .labels(Map.of("my_job", "load"))
                .load(JobLoadArgs.builder()
                    .sourceUris(Output.tuple(object.bucket(), object.name()).applyValue(values -> {
                        var bucket = values.t1;
                        var name = values.t2;
                        return String.format("gs://%s/%s", bucket,name);
                    }))
                    .destinationTable(JobLoadDestinationTableArgs.builder()
                        .projectId(foo.project())
                        .datasetId(foo.datasetId())
                        .tableId(foo.tableId())
                        .build())
                    .writeDisposition("WRITE_TRUNCATE")
                    .autodetect(true)
                    .sourceFormat("NEWLINE_DELIMITED_JSON")
                    .jsonExtension("GEOJSON")
                    .build())
                .build());
    
        }
    }
    
    resources:
      bucket:
        type: gcp:storage:Bucket
        properties:
          name: ${project}-bq-geojson
          location: US
          uniformBucketLevelAccess: true
      object:
        type: gcp:storage:BucketObject
        properties:
          name: geojson-data.jsonl
          bucket: ${bucket.name}
          content: |
            {"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
            {"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}        
      foo:
        type: gcp:bigquery:Table
        properties:
          deletionProtection: false
          datasetId: ${bar.datasetId}
          tableId: job_load_table
      bar:
        type: gcp:bigquery:Dataset
        properties:
          datasetId: job_load_dataset
          friendlyName: test
          description: This is a test description
          location: US
      job:
        type: gcp:bigquery:Job
        properties:
          jobId: job_load
          labels:
            my_job: load
          load:
            sourceUris:
              - gs://${object.bucket}/${object.name}
            destinationTable:
              projectId: ${foo.project}
              datasetId: ${foo.datasetId}
              tableId: ${foo.tableId}
            writeDisposition: WRITE_TRUNCATE
            autodetect: true
            sourceFormat: NEWLINE_DELIMITED_JSON
            jsonExtension: GEOJSON
    variables:
      project: my-project-name
    

    Bigquery Job Load Parquet

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const test = new gcp.storage.Bucket("test", {
        name: "job_load_bucket",
        location: "US",
        uniformBucketLevelAccess: true,
    });
    const testBucketObject = new gcp.storage.BucketObject("test", {
        name: "job_load_bucket_object",
        source: new pulumi.asset.FileAsset("./test-fixtures/test.parquet.gzip"),
        bucket: test.name,
    });
    const testDataset = new gcp.bigquery.Dataset("test", {
        datasetId: "job_load_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    });
    const testTable = new gcp.bigquery.Table("test", {
        deletionProtection: false,
        tableId: "job_load_table",
        datasetId: testDataset.datasetId,
    });
    const job = new gcp.bigquery.Job("job", {
        jobId: "job_load",
        labels: {
            my_job: "load",
        },
        load: {
            sourceUris: [pulumi.interpolate`gs://${testBucketObject.bucket}/${testBucketObject.name}`],
            destinationTable: {
                projectId: testTable.project,
                datasetId: testTable.datasetId,
                tableId: testTable.tableId,
            },
            schemaUpdateOptions: [
                "ALLOW_FIELD_RELAXATION",
                "ALLOW_FIELD_ADDITION",
            ],
            writeDisposition: "WRITE_APPEND",
            sourceFormat: "PARQUET",
            autodetect: true,
            parquetOptions: {
                enumAsString: true,
                enableListInference: true,
            },
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    test = gcp.storage.Bucket("test",
        name="job_load_bucket",
        location="US",
        uniform_bucket_level_access=True)
    test_bucket_object = gcp.storage.BucketObject("test",
        name="job_load_bucket_object",
        source=pulumi.FileAsset("./test-fixtures/test.parquet.gzip"),
        bucket=test.name)
    test_dataset = gcp.bigquery.Dataset("test",
        dataset_id="job_load_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US")
    test_table = gcp.bigquery.Table("test",
        deletion_protection=False,
        table_id="job_load_table",
        dataset_id=test_dataset.dataset_id)
    job = gcp.bigquery.Job("job",
        job_id="job_load",
        labels={
            "my_job": "load",
        },
        load=gcp.bigquery.JobLoadArgs(
            source_uris=[pulumi.Output.all(test_bucket_object.bucket, test_bucket_object.name).apply(lambda bucket, name: f"gs://{bucket}/{name}")],
            destination_table=gcp.bigquery.JobLoadDestinationTableArgs(
                project_id=test_table.project,
                dataset_id=test_table.dataset_id,
                table_id=test_table.table_id,
            ),
            schema_update_options=[
                "ALLOW_FIELD_RELAXATION",
                "ALLOW_FIELD_ADDITION",
            ],
            write_disposition="WRITE_APPEND",
            source_format="PARQUET",
            autodetect=True,
            parquet_options=gcp.bigquery.JobLoadParquetOptionsArgs(
                enum_as_string=True,
                enable_list_inference=True,
            ),
        ))
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/storage"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		test, err := storage.NewBucket(ctx, "test", &storage.BucketArgs{
    			Name:                     pulumi.String("job_load_bucket"),
    			Location:                 pulumi.String("US"),
    			UniformBucketLevelAccess: pulumi.Bool(true),
    		})
    		if err != nil {
    			return err
    		}
    		testBucketObject, err := storage.NewBucketObject(ctx, "test", &storage.BucketObjectArgs{
    			Name:   pulumi.String("job_load_bucket_object"),
    			Source: pulumi.NewFileAsset("./test-fixtures/test.parquet.gzip"),
    			Bucket: test.Name,
    		})
    		if err != nil {
    			return err
    		}
    		testDataset, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
    			DatasetId:    pulumi.String("job_load_dataset"),
    			FriendlyName: pulumi.String("test"),
    			Description:  pulumi.String("This is a test description"),
    			Location:     pulumi.String("US"),
    		})
    		if err != nil {
    			return err
    		}
    		testTable, err := bigquery.NewTable(ctx, "test", &bigquery.TableArgs{
    			DeletionProtection: pulumi.Bool(false),
    			TableId:            pulumi.String("job_load_table"),
    			DatasetId:          testDataset.DatasetId,
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
    			JobId: pulumi.String("job_load"),
    			Labels: pulumi.StringMap{
    				"my_job": pulumi.String("load"),
    			},
    			Load: &bigquery.JobLoadArgs{
    				SourceUris: pulumi.StringArray{
    					pulumi.All(testBucketObject.Bucket, testBucketObject.Name).ApplyT(func(_args []interface{}) (string, error) {
    						bucket := _args[0].(string)
    						name := _args[1].(string)
    						return fmt.Sprintf("gs://%v/%v", bucket, name), nil
    					}).(pulumi.StringOutput),
    				},
    				DestinationTable: &bigquery.JobLoadDestinationTableArgs{
    					ProjectId: testTable.Project,
    					DatasetId: testTable.DatasetId,
    					TableId:   testTable.TableId,
    				},
    				SchemaUpdateOptions: pulumi.StringArray{
    					pulumi.String("ALLOW_FIELD_RELAXATION"),
    					pulumi.String("ALLOW_FIELD_ADDITION"),
    				},
    				WriteDisposition: pulumi.String("WRITE_APPEND"),
    				SourceFormat:     pulumi.String("PARQUET"),
    				Autodetect:       pulumi.Bool(true),
    				ParquetOptions: &bigquery.JobLoadParquetOptionsArgs{
    					EnumAsString:        pulumi.Bool(true),
    					EnableListInference: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var test = new Gcp.Storage.Bucket("test", new()
        {
            Name = "job_load_bucket",
            Location = "US",
            UniformBucketLevelAccess = true,
        });
    
        var testBucketObject = new Gcp.Storage.BucketObject("test", new()
        {
            Name = "job_load_bucket_object",
            Source = new FileAsset("./test-fixtures/test.parquet.gzip"),
            Bucket = test.Name,
        });
    
        var testDataset = new Gcp.BigQuery.Dataset("test", new()
        {
            DatasetId = "job_load_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        });
    
        var testTable = new Gcp.BigQuery.Table("test", new()
        {
            DeletionProtection = false,
            TableId = "job_load_table",
            DatasetId = testDataset.DatasetId,
        });
    
        var job = new Gcp.BigQuery.Job("job", new()
        {
            JobId = "job_load",
            Labels = 
            {
                { "my_job", "load" },
            },
            Load = new Gcp.BigQuery.Inputs.JobLoadArgs
            {
                SourceUris = new[]
                {
                    Output.Tuple(testBucketObject.Bucket, testBucketObject.Name).Apply(values =>
                    {
                        var bucket = values.Item1;
                        var name = values.Item2;
                        return $"gs://{bucket}/{name}";
                    }),
                },
                DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
                {
                    ProjectId = testTable.Project,
                    DatasetId = testTable.DatasetId,
                    TableId = testTable.TableId,
                },
                SchemaUpdateOptions = new[]
                {
                    "ALLOW_FIELD_RELAXATION",
                    "ALLOW_FIELD_ADDITION",
                },
                WriteDisposition = "WRITE_APPEND",
                SourceFormat = "PARQUET",
                Autodetect = true,
                ParquetOptions = new Gcp.BigQuery.Inputs.JobLoadParquetOptionsArgs
                {
                    EnumAsString = true,
                    EnableListInference = true,
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.storage.Bucket;
    import com.pulumi.gcp.storage.BucketArgs;
    import com.pulumi.gcp.storage.BucketObject;
    import com.pulumi.gcp.storage.BucketObjectArgs;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.bigquery.Job;
    import com.pulumi.gcp.bigquery.JobArgs;
    import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
    import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
    import com.pulumi.gcp.bigquery.inputs.JobLoadParquetOptionsArgs;
    import com.pulumi.asset.FileAsset;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var test = new Bucket("test", BucketArgs.builder()        
                .name("job_load_bucket")
                .location("US")
                .uniformBucketLevelAccess(true)
                .build());
    
            var testBucketObject = new BucketObject("testBucketObject", BucketObjectArgs.builder()        
                .name("job_load_bucket_object")
                .source(new FileAsset("./test-fixtures/test.parquet.gzip"))
                .bucket(test.name())
                .build());
    
            var testDataset = new Dataset("testDataset", DatasetArgs.builder()        
                .datasetId("job_load_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
    
            var testTable = new Table("testTable", TableArgs.builder()        
                .deletionProtection(false)
                .tableId("job_load_table")
                .datasetId(testDataset.datasetId())
                .build());
    
            var job = new Job("job", JobArgs.builder()        
                .jobId("job_load")
                .labels(Map.of("my_job", "load"))
                .load(JobLoadArgs.builder()
                    .sourceUris(Output.tuple(testBucketObject.bucket(), testBucketObject.name()).applyValue(values -> {
                        var bucket = values.t1;
                        var name = values.t2;
                        return String.format("gs://%s/%s", bucket,name);
                    }))
                    .destinationTable(JobLoadDestinationTableArgs.builder()
                        .projectId(testTable.project())
                        .datasetId(testTable.datasetId())
                        .tableId(testTable.tableId())
                        .build())
                    .schemaUpdateOptions(                
                        "ALLOW_FIELD_RELAXATION",
                        "ALLOW_FIELD_ADDITION")
                    .writeDisposition("WRITE_APPEND")
                    .sourceFormat("PARQUET")
                    .autodetect(true)
                    .parquetOptions(JobLoadParquetOptionsArgs.builder()
                        .enumAsString(true)
                        .enableListInference(true)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      test:
        type: gcp:storage:Bucket
        properties:
          name: job_load_bucket
          location: US
          uniformBucketLevelAccess: true
      testBucketObject:
        type: gcp:storage:BucketObject
        name: test
        properties:
          name: job_load_bucket_object
          source:
            fn::FileAsset: ./test-fixtures/test.parquet.gzip
          bucket: ${test.name}
      testDataset:
        type: gcp:bigquery:Dataset
        name: test
        properties:
          datasetId: job_load_dataset
          friendlyName: test
          description: This is a test description
          location: US
      testTable:
        type: gcp:bigquery:Table
        name: test
        properties:
          deletionProtection: false
          tableId: job_load_table
          datasetId: ${testDataset.datasetId}
      job:
        type: gcp:bigquery:Job
        properties:
          jobId: job_load
          labels:
            my_job: load
          load:
            sourceUris:
              - gs://${testBucketObject.bucket}/${testBucketObject.name}
            destinationTable:
              projectId: ${testTable.project}
              datasetId: ${testTable.datasetId}
              tableId: ${testTable.tableId}
            schemaUpdateOptions:
              - ALLOW_FIELD_RELAXATION
              - ALLOW_FIELD_ADDITION
            writeDisposition: WRITE_APPEND
            sourceFormat: PARQUET
            autodetect: true
            parquetOptions:
              enumAsString: true
              enableListInference: true
    

    Bigquery Job Copy

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const sourceDataset: gcp.bigquery.Dataset[] = [];
    for (const range = {value: 0}; range.value < 2; range.value++) {
        sourceDataset.push(new gcp.bigquery.Dataset(`source-${range.value}`, {
            datasetId: `job_copy_${range.value}_dataset`,
            friendlyName: "test",
            description: "This is a test description",
            location: "US",
        }));
    }
    const source: gcp.bigquery.Table[] = [];
    sourceDataset.length.apply(rangeBody => {
        for (const range = {value: 0}; range.value < rangeBody; range.value++) {
            source.push(new gcp.bigquery.Table(`source-${range.value}`, {
                deletionProtection: false,
                datasetId: sourceDataset[range.value].datasetId,
                tableId: `job_copy_${range.value}_table`,
                schema: `[
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
    `,
            }));
        }
    });
    const destDataset = new gcp.bigquery.Dataset("dest", {
        datasetId: "job_copy_dest_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    });
    const keyRing = new gcp.kms.KeyRing("key_ring", {
        name: "example-keyring",
        location: "global",
    });
    const cryptoKey = new gcp.kms.CryptoKey("crypto_key", {
        name: "example-key",
        keyRing: keyRing.id,
    });
    const dest = new gcp.bigquery.Table("dest", {
        deletionProtection: false,
        datasetId: destDataset.datasetId,
        tableId: "job_copy_dest_table",
        schema: `[
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
    `,
        encryptionConfiguration: {
            kmsKeyName: cryptoKey.id,
        },
    });
    const project = gcp.organizations.getProject({
        projectId: "my-project-name",
    });
    const encryptRole = new gcp.projects.IAMMember("encrypt_role", {
        project: project.then(project => project.projectId),
        role: "roles/cloudkms.cryptoKeyEncrypterDecrypter",
        member: project.then(project => `serviceAccount:bq-${project.number}@bigquery-encryption.iam.gserviceaccount.com`),
    });
    const job = new gcp.bigquery.Job("job", {
        jobId: "job_copy",
        copy: {
            sourceTables: [
                {
                    projectId: source[0].project,
                    datasetId: source[0].datasetId,
                    tableId: source[0].tableId,
                },
                {
                    projectId: source[1].project,
                    datasetId: source[1].datasetId,
                    tableId: source[1].tableId,
                },
            ],
            destinationTable: {
                projectId: dest.project,
                datasetId: dest.datasetId,
                tableId: dest.tableId,
            },
            destinationEncryptionConfiguration: {
                kmsKeyName: cryptoKey.id,
            },
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    source_dataset = []
    for range in [{"value": i} for i in range(0, 2)]:
        source_dataset.append(gcp.bigquery.Dataset(f"source-{range['value']}",
            dataset_id=f"job_copy_{range['value']}_dataset",
            friendly_name="test",
            description="This is a test description",
            location="US"))
    source = []
    def create_source(range_body):
        for range in [{"value": i} for i in range(0, range_body)]:
            source.append(gcp.bigquery.Table(f"source-{range['value']}",
                deletion_protection=False,
                dataset_id=source_dataset[range["value"]].dataset_id,
                table_id=f"job_copy_{range['value']}_table",
                schema="""[
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
    """))
    
    (len(source_dataset)).apply(create_source)
    dest_dataset = gcp.bigquery.Dataset("dest",
        dataset_id="job_copy_dest_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US")
    key_ring = gcp.kms.KeyRing("key_ring",
        name="example-keyring",
        location="global")
    crypto_key = gcp.kms.CryptoKey("crypto_key",
        name="example-key",
        key_ring=key_ring.id)
    dest = gcp.bigquery.Table("dest",
        deletion_protection=False,
        dataset_id=dest_dataset.dataset_id,
        table_id="job_copy_dest_table",
        schema="""[
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
    """,
        encryption_configuration=gcp.bigquery.TableEncryptionConfigurationArgs(
            kms_key_name=crypto_key.id,
        ))
    project = gcp.organizations.get_project(project_id="my-project-name")
    encrypt_role = gcp.projects.IAMMember("encrypt_role",
        project=project.project_id,
        role="roles/cloudkms.cryptoKeyEncrypterDecrypter",
        member=f"serviceAccount:bq-{project.number}@bigquery-encryption.iam.gserviceaccount.com")
    job = gcp.bigquery.Job("job",
        job_id="job_copy",
        copy=gcp.bigquery.JobCopyArgs(
            source_tables=[
                gcp.bigquery.JobCopySourceTableArgs(
                    project_id=source[0].project,
                    dataset_id=source[0].dataset_id,
                    table_id=source[0].table_id,
                ),
                gcp.bigquery.JobCopySourceTableArgs(
                    project_id=source[1].project,
                    dataset_id=source[1].dataset_id,
                    table_id=source[1].table_id,
                ),
            ],
            destination_table=gcp.bigquery.JobCopyDestinationTableArgs(
                project_id=dest.project,
                dataset_id=dest.dataset_id,
                table_id=dest.table_id,
            ),
            destination_encryption_configuration=gcp.bigquery.JobCopyDestinationEncryptionConfigurationArgs(
                kms_key_name=crypto_key.id,
            ),
        ))
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/kms"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/organizations"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/projects"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		var sourceDataset []*bigquery.Dataset
    		for index := 0; index < 2; index++ {
    			key0 := index
    			val0 := index
    			__res, err := bigquery.NewDataset(ctx, fmt.Sprintf("source-%v", key0), &bigquery.DatasetArgs{
    				DatasetId:    pulumi.String(fmt.Sprintf("job_copy_%v_dataset", val0)),
    				FriendlyName: pulumi.String("test"),
    				Description:  pulumi.String("This is a test description"),
    				Location:     pulumi.String("US"),
    			})
    			if err != nil {
    				return err
    			}
    			sourceDataset = append(sourceDataset, __res)
    		}
    		var source []*bigquery.Table
    		for index := 0; index < len(sourceDataset); index++ {
    			key0 := index
    			val0 := index
    			__res, err := bigquery.NewTable(ctx, fmt.Sprintf("source-%v", key0), &bigquery.TableArgs{
    				DeletionProtection: pulumi.Bool(false),
    				DatasetId:          sourceDataset[val0].DatasetId,
    				TableId:            pulumi.String(fmt.Sprintf("job_copy_%v_table", val0)),
    				Schema: pulumi.String(`[
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
    `),
    			})
    			if err != nil {
    				return err
    			}
    			source = append(source, __res)
    		}
    		destDataset, err := bigquery.NewDataset(ctx, "dest", &bigquery.DatasetArgs{
    			DatasetId:    pulumi.String("job_copy_dest_dataset"),
    			FriendlyName: pulumi.String("test"),
    			Description:  pulumi.String("This is a test description"),
    			Location:     pulumi.String("US"),
    		})
    		if err != nil {
    			return err
    		}
    		keyRing, err := kms.NewKeyRing(ctx, "key_ring", &kms.KeyRingArgs{
    			Name:     pulumi.String("example-keyring"),
    			Location: pulumi.String("global"),
    		})
    		if err != nil {
    			return err
    		}
    		cryptoKey, err := kms.NewCryptoKey(ctx, "crypto_key", &kms.CryptoKeyArgs{
    			Name:    pulumi.String("example-key"),
    			KeyRing: keyRing.ID(),
    		})
    		if err != nil {
    			return err
    		}
    		dest, err := bigquery.NewTable(ctx, "dest", &bigquery.TableArgs{
    			DeletionProtection: pulumi.Bool(false),
    			DatasetId:          destDataset.DatasetId,
    			TableId:            pulumi.String("job_copy_dest_table"),
    			Schema: pulumi.String(`[
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
    `),
    			EncryptionConfiguration: &bigquery.TableEncryptionConfigurationArgs{
    				KmsKeyName: cryptoKey.ID(),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{
    			ProjectId: pulumi.StringRef("my-project-name"),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = projects.NewIAMMember(ctx, "encrypt_role", &projects.IAMMemberArgs{
    			Project: pulumi.String(project.ProjectId),
    			Role:    pulumi.String("roles/cloudkms.cryptoKeyEncrypterDecrypter"),
    			Member:  pulumi.String(fmt.Sprintf("serviceAccount:bq-%v@bigquery-encryption.iam.gserviceaccount.com", project.Number)),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
    			JobId: pulumi.String("job_copy"),
    			Copy: &bigquery.JobCopyArgs{
    				SourceTables: bigquery.JobCopySourceTableArray{
    					&bigquery.JobCopySourceTableArgs{
    						ProjectId: source[0].Project,
    						DatasetId: source[0].DatasetId,
    						TableId:   source[0].TableId,
    					},
    					&bigquery.JobCopySourceTableArgs{
    						ProjectId: source[1].Project,
    						DatasetId: source[1].DatasetId,
    						TableId:   source[1].TableId,
    					},
    				},
    				DestinationTable: &bigquery.JobCopyDestinationTableArgs{
    					ProjectId: dest.Project,
    					DatasetId: dest.DatasetId,
    					TableId:   dest.TableId,
    				},
    				DestinationEncryptionConfiguration: &bigquery.JobCopyDestinationEncryptionConfigurationArgs{
    					KmsKeyName: cryptoKey.ID(),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var sourceDataset = new List<Gcp.BigQuery.Dataset>();
        for (var rangeIndex = 0; rangeIndex < 2; rangeIndex++)
        {
            var range = new { Value = rangeIndex };
            sourceDataset.Add(new Gcp.BigQuery.Dataset($"source-{range.Value}", new()
            {
                DatasetId = $"job_copy_{range.Value}_dataset",
                FriendlyName = "test",
                Description = "This is a test description",
                Location = "US",
            }));
        }
        var source = new List<Gcp.BigQuery.Table>();
        for (var rangeIndex = 0; rangeIndex < sourceDataset.Length; rangeIndex++)
        {
            var range = new { Value = rangeIndex };
            source.Add(new Gcp.BigQuery.Table($"source-{range.Value}", new()
            {
                DeletionProtection = false,
                DatasetId = sourceDataset[range.Value].DatasetId,
                TableId = $"job_copy_{range.Value}_table",
                Schema = @"[
      {
        ""name"": ""name"",
        ""type"": ""STRING"",
        ""mode"": ""NULLABLE""
      },
      {
        ""name"": ""post_abbr"",
        ""type"": ""STRING"",
        ""mode"": ""NULLABLE""
      },
      {
        ""name"": ""date"",
        ""type"": ""DATE"",
        ""mode"": ""NULLABLE""
      }
    ]
    ",
            }));
        }
        var destDataset = new Gcp.BigQuery.Dataset("dest", new()
        {
            DatasetId = "job_copy_dest_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        });
    
        var keyRing = new Gcp.Kms.KeyRing("key_ring", new()
        {
            Name = "example-keyring",
            Location = "global",
        });
    
        var cryptoKey = new Gcp.Kms.CryptoKey("crypto_key", new()
        {
            Name = "example-key",
            KeyRing = keyRing.Id,
        });
    
        var dest = new Gcp.BigQuery.Table("dest", new()
        {
            DeletionProtection = false,
            DatasetId = destDataset.DatasetId,
            TableId = "job_copy_dest_table",
            Schema = @"[
      {
        ""name"": ""name"",
        ""type"": ""STRING"",
        ""mode"": ""NULLABLE""
      },
      {
        ""name"": ""post_abbr"",
        ""type"": ""STRING"",
        ""mode"": ""NULLABLE""
      },
      {
        ""name"": ""date"",
        ""type"": ""DATE"",
        ""mode"": ""NULLABLE""
      }
    ]
    ",
            EncryptionConfiguration = new Gcp.BigQuery.Inputs.TableEncryptionConfigurationArgs
            {
                KmsKeyName = cryptoKey.Id,
            },
        });
    
        var project = Gcp.Organizations.GetProject.Invoke(new()
        {
            ProjectId = "my-project-name",
        });
    
        var encryptRole = new Gcp.Projects.IAMMember("encrypt_role", new()
        {
            Project = project.Apply(getProjectResult => getProjectResult.ProjectId),
            Role = "roles/cloudkms.cryptoKeyEncrypterDecrypter",
            Member = $"serviceAccount:bq-{project.Apply(getProjectResult => getProjectResult.Number)}@bigquery-encryption.iam.gserviceaccount.com",
        });
    
        var job = new Gcp.BigQuery.Job("job", new()
        {
            JobId = "job_copy",
            Copy = new Gcp.BigQuery.Inputs.JobCopyArgs
            {
                SourceTables = new[]
                {
                    new Gcp.BigQuery.Inputs.JobCopySourceTableArgs
                    {
                        ProjectId = source[0].Project,
                        DatasetId = source[0].DatasetId,
                        TableId = source[0].TableId,
                    },
                    new Gcp.BigQuery.Inputs.JobCopySourceTableArgs
                    {
                        ProjectId = source[1].Project,
                        DatasetId = source[1].DatasetId,
                        TableId = source[1].TableId,
                    },
                },
                DestinationTable = new Gcp.BigQuery.Inputs.JobCopyDestinationTableArgs
                {
                    ProjectId = dest.Project,
                    DatasetId = dest.DatasetId,
                    TableId = dest.TableId,
                },
                DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobCopyDestinationEncryptionConfigurationArgs
                {
                    KmsKeyName = cryptoKey.Id,
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.kms.KeyRing;
    import com.pulumi.gcp.kms.KeyRingArgs;
    import com.pulumi.gcp.kms.CryptoKey;
    import com.pulumi.gcp.kms.CryptoKeyArgs;
    import com.pulumi.gcp.bigquery.inputs.TableEncryptionConfigurationArgs;
    import com.pulumi.gcp.organizations.OrganizationsFunctions;
    import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
    import com.pulumi.gcp.projects.IAMMember;
    import com.pulumi.gcp.projects.IAMMemberArgs;
    import com.pulumi.gcp.bigquery.Job;
    import com.pulumi.gcp.bigquery.JobArgs;
    import com.pulumi.gcp.bigquery.inputs.JobCopyArgs;
    import com.pulumi.gcp.bigquery.inputs.JobCopyDestinationTableArgs;
    import com.pulumi.gcp.bigquery.inputs.JobCopyDestinationEncryptionConfigurationArgs;
    import com.pulumi.codegen.internal.KeyedValue;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            for (var i = 0; i < 2; i++) {
                new Dataset("sourceDataset-" + i, DatasetArgs.builder()            
                    .datasetId(String.format("job_copy_%s_dataset", range.value()))
                    .friendlyName("test")
                    .description("This is a test description")
                    .location("US")
                    .build());
    
            
    }
            for (var i = 0; i < sourceDataset.length(); i++) {
                new Table("source-" + i, TableArgs.builder()            
                    .deletionProtection(false)
                    .datasetId(sourceDataset[range.value()].datasetId())
                    .tableId(String.format("job_copy_%s_table", range.value()))
                    .schema("""
    [
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
                    """)
                    .build());
    
            
    }
            var destDataset = new Dataset("destDataset", DatasetArgs.builder()        
                .datasetId("job_copy_dest_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
    
            var keyRing = new KeyRing("keyRing", KeyRingArgs.builder()        
                .name("example-keyring")
                .location("global")
                .build());
    
            var cryptoKey = new CryptoKey("cryptoKey", CryptoKeyArgs.builder()        
                .name("example-key")
                .keyRing(keyRing.id())
                .build());
    
            var dest = new Table("dest", TableArgs.builder()        
                .deletionProtection(false)
                .datasetId(destDataset.datasetId())
                .tableId("job_copy_dest_table")
                .schema("""
    [
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
                """)
                .encryptionConfiguration(TableEncryptionConfigurationArgs.builder()
                    .kmsKeyName(cryptoKey.id())
                    .build())
                .build());
    
            final var project = OrganizationsFunctions.getProject(GetProjectArgs.builder()
                .projectId("my-project-name")
                .build());
    
            var encryptRole = new IAMMember("encryptRole", IAMMemberArgs.builder()        
                .project(project.applyValue(getProjectResult -> getProjectResult.projectId()))
                .role("roles/cloudkms.cryptoKeyEncrypterDecrypter")
                .member(String.format("serviceAccount:bq-%s@bigquery-encryption.iam.gserviceaccount.com", project.applyValue(getProjectResult -> getProjectResult.number())))
                .build());
    
            var job = new Job("job", JobArgs.builder()        
                .jobId("job_copy")
                .copy(JobCopyArgs.builder()
                    .sourceTables(                
                        JobCopySourceTableArgs.builder()
                            .projectId(source[0].project())
                            .datasetId(source[0].datasetId())
                            .tableId(source[0].tableId())
                            .build(),
                        JobCopySourceTableArgs.builder()
                            .projectId(source[1].project())
                            .datasetId(source[1].datasetId())
                            .tableId(source[1].tableId())
                            .build())
                    .destinationTable(JobCopyDestinationTableArgs.builder()
                        .projectId(dest.project())
                        .datasetId(dest.datasetId())
                        .tableId(dest.tableId())
                        .build())
                    .destinationEncryptionConfiguration(JobCopyDestinationEncryptionConfigurationArgs.builder()
                        .kmsKeyName(cryptoKey.id())
                        .build())
                    .build())
                .build());
    
        }
    }
    
    Coming soon!
    

    Bigquery Job Extract

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const source_oneDataset = new gcp.bigquery.Dataset("source-one", {
        datasetId: "job_extract_dataset",
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    });
    const source_one = new gcp.bigquery.Table("source-one", {
        deletionProtection: false,
        datasetId: source_oneDataset.datasetId,
        tableId: "job_extract_table",
        schema: `[
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
    `,
    });
    const dest = new gcp.storage.Bucket("dest", {
        name: "job_extract_bucket",
        location: "US",
        forceDestroy: true,
    });
    const job = new gcp.bigquery.Job("job", {
        jobId: "job_extract",
        extract: {
            destinationUris: [pulumi.interpolate`${dest.url}/extract`],
            sourceTable: {
                projectId: source_one.project,
                datasetId: source_one.datasetId,
                tableId: source_one.tableId,
            },
            destinationFormat: "NEWLINE_DELIMITED_JSON",
            compression: "GZIP",
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    source_one_dataset = gcp.bigquery.Dataset("source-one",
        dataset_id="job_extract_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US")
    source_one = gcp.bigquery.Table("source-one",
        deletion_protection=False,
        dataset_id=source_one_dataset.dataset_id,
        table_id="job_extract_table",
        schema="""[
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
    """)
    dest = gcp.storage.Bucket("dest",
        name="job_extract_bucket",
        location="US",
        force_destroy=True)
    job = gcp.bigquery.Job("job",
        job_id="job_extract",
        extract=gcp.bigquery.JobExtractArgs(
            destination_uris=[dest.url.apply(lambda url: f"{url}/extract")],
            source_table=gcp.bigquery.JobExtractSourceTableArgs(
                project_id=source_one.project,
                dataset_id=source_one.dataset_id,
                table_id=source_one.table_id,
            ),
            destination_format="NEWLINE_DELIMITED_JSON",
            compression="GZIP",
        ))
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/storage"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := bigquery.NewDataset(ctx, "source-one", &bigquery.DatasetArgs{
    			DatasetId:    pulumi.String("job_extract_dataset"),
    			FriendlyName: pulumi.String("test"),
    			Description:  pulumi.String("This is a test description"),
    			Location:     pulumi.String("US"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewTable(ctx, "source-one", &bigquery.TableArgs{
    			DeletionProtection: pulumi.Bool(false),
    			DatasetId:          source_oneDataset.DatasetId,
    			TableId:            pulumi.String("job_extract_table"),
    			Schema: pulumi.String(`[
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
    `),
    		})
    		if err != nil {
    			return err
    		}
    		dest, err := storage.NewBucket(ctx, "dest", &storage.BucketArgs{
    			Name:         pulumi.String("job_extract_bucket"),
    			Location:     pulumi.String("US"),
    			ForceDestroy: pulumi.Bool(true),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
    			JobId: pulumi.String("job_extract"),
    			Extract: &bigquery.JobExtractArgs{
    				DestinationUris: pulumi.StringArray{
    					dest.Url.ApplyT(func(url string) (string, error) {
    						return fmt.Sprintf("%v/extract", url), nil
    					}).(pulumi.StringOutput),
    				},
    				SourceTable: &bigquery.JobExtractSourceTableArgs{
    					ProjectId: source_one.Project,
    					DatasetId: source_one.DatasetId,
    					TableId:   source_one.TableId,
    				},
    				DestinationFormat: pulumi.String("NEWLINE_DELIMITED_JSON"),
    				Compression:       pulumi.String("GZIP"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var source_oneDataset = new Gcp.BigQuery.Dataset("source-one", new()
        {
            DatasetId = "job_extract_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        });
    
        var source_one = new Gcp.BigQuery.Table("source-one", new()
        {
            DeletionProtection = false,
            DatasetId = source_oneDataset.DatasetId,
            TableId = "job_extract_table",
            Schema = @"[
      {
        ""name"": ""name"",
        ""type"": ""STRING"",
        ""mode"": ""NULLABLE""
      },
      {
        ""name"": ""post_abbr"",
        ""type"": ""STRING"",
        ""mode"": ""NULLABLE""
      },
      {
        ""name"": ""date"",
        ""type"": ""DATE"",
        ""mode"": ""NULLABLE""
      }
    ]
    ",
        });
    
        var dest = new Gcp.Storage.Bucket("dest", new()
        {
            Name = "job_extract_bucket",
            Location = "US",
            ForceDestroy = true,
        });
    
        var job = new Gcp.BigQuery.Job("job", new()
        {
            JobId = "job_extract",
            Extract = new Gcp.BigQuery.Inputs.JobExtractArgs
            {
                DestinationUris = new[]
                {
                    dest.Url.Apply(url => $"{url}/extract"),
                },
                SourceTable = new Gcp.BigQuery.Inputs.JobExtractSourceTableArgs
                {
                    ProjectId = source_one.Project,
                    DatasetId = source_one.DatasetId,
                    TableId = source_one.TableId,
                },
                DestinationFormat = "NEWLINE_DELIMITED_JSON",
                Compression = "GZIP",
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.bigquery.Dataset;
    import com.pulumi.gcp.bigquery.DatasetArgs;
    import com.pulumi.gcp.bigquery.Table;
    import com.pulumi.gcp.bigquery.TableArgs;
    import com.pulumi.gcp.storage.Bucket;
    import com.pulumi.gcp.storage.BucketArgs;
    import com.pulumi.gcp.bigquery.Job;
    import com.pulumi.gcp.bigquery.JobArgs;
    import com.pulumi.gcp.bigquery.inputs.JobExtractArgs;
    import com.pulumi.gcp.bigquery.inputs.JobExtractSourceTableArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var source_oneDataset = new Dataset("source-oneDataset", DatasetArgs.builder()        
                .datasetId("job_extract_dataset")
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());
    
            var source_one = new Table("source-one", TableArgs.builder()        
                .deletionProtection(false)
                .datasetId(source_oneDataset.datasetId())
                .tableId("job_extract_table")
                .schema("""
    [
      {
        "name": "name",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "post_abbr",
        "type": "STRING",
        "mode": "NULLABLE"
      },
      {
        "name": "date",
        "type": "DATE",
        "mode": "NULLABLE"
      }
    ]
                """)
                .build());
    
            var dest = new Bucket("dest", BucketArgs.builder()        
                .name("job_extract_bucket")
                .location("US")
                .forceDestroy(true)
                .build());
    
            var job = new Job("job", JobArgs.builder()        
                .jobId("job_extract")
                .extract(JobExtractArgs.builder()
                    .destinationUris(dest.url().applyValue(url -> String.format("%s/extract", url)))
                    .sourceTable(JobExtractSourceTableArgs.builder()
                        .projectId(source_one.project())
                        .datasetId(source_one.datasetId())
                        .tableId(source_one.tableId())
                        .build())
                    .destinationFormat("NEWLINE_DELIMITED_JSON")
                    .compression("GZIP")
                    .build())
                .build());
    
        }
    }
    
    resources:
      source-one:
        type: gcp:bigquery:Table
        properties:
          deletionProtection: false
          datasetId: ${["source-oneDataset"].datasetId}
          tableId: job_extract_table
          schema: |
            [
              {
                "name": "name",
                "type": "STRING",
                "mode": "NULLABLE"
              },
              {
                "name": "post_abbr",
                "type": "STRING",
                "mode": "NULLABLE"
              },
              {
                "name": "date",
                "type": "DATE",
                "mode": "NULLABLE"
              }
            ]        
      source-oneDataset:
        type: gcp:bigquery:Dataset
        name: source-one
        properties:
          datasetId: job_extract_dataset
          friendlyName: test
          description: This is a test description
          location: US
      dest:
        type: gcp:storage:Bucket
        properties:
          name: job_extract_bucket
          location: US
          forceDestroy: true
      job:
        type: gcp:bigquery:Job
        properties:
          jobId: job_extract
          extract:
            destinationUris:
              - ${dest.url}/extract
            sourceTable:
              projectId: ${["source-one"].project}
              datasetId: ${["source-one"].datasetId}
              tableId: ${["source-one"].tableId}
            destinationFormat: NEWLINE_DELIMITED_JSON
            compression: GZIP
    

    Create Job Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Job(name: string, args: JobArgs, opts?: CustomResourceOptions);
    @overload
    def Job(resource_name: str,
            args: JobArgs,
            opts: Optional[ResourceOptions] = None)
    
    @overload
    def Job(resource_name: str,
            opts: Optional[ResourceOptions] = None,
            job_id: Optional[str] = None,
            copy: Optional[JobCopyArgs] = None,
            extract: Optional[JobExtractArgs] = None,
            job_timeout_ms: Optional[str] = None,
            labels: Optional[Mapping[str, str]] = None,
            load: Optional[JobLoadArgs] = None,
            location: Optional[str] = None,
            project: Optional[str] = None,
            query: Optional[JobQueryArgs] = None)
    func NewJob(ctx *Context, name string, args JobArgs, opts ...ResourceOption) (*Job, error)
    public Job(string name, JobArgs args, CustomResourceOptions? opts = null)
    public Job(String name, JobArgs args)
    public Job(String name, JobArgs args, CustomResourceOptions options)
    
    type: gcp:bigquery:Job
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var jobResource = new Gcp.BigQuery.Job("jobResource", new()
    {
        JobId = "string",
        Copy = new Gcp.BigQuery.Inputs.JobCopyArgs
        {
            SourceTables = new[]
            {
                new Gcp.BigQuery.Inputs.JobCopySourceTableArgs
                {
                    TableId = "string",
                    DatasetId = "string",
                    ProjectId = "string",
                },
            },
            CreateDisposition = "string",
            DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobCopyDestinationEncryptionConfigurationArgs
            {
                KmsKeyName = "string",
                KmsKeyVersion = "string",
            },
            DestinationTable = new Gcp.BigQuery.Inputs.JobCopyDestinationTableArgs
            {
                TableId = "string",
                DatasetId = "string",
                ProjectId = "string",
            },
            WriteDisposition = "string",
        },
        Extract = new Gcp.BigQuery.Inputs.JobExtractArgs
        {
            DestinationUris = new[]
            {
                "string",
            },
            Compression = "string",
            DestinationFormat = "string",
            FieldDelimiter = "string",
            PrintHeader = false,
            SourceModel = new Gcp.BigQuery.Inputs.JobExtractSourceModelArgs
            {
                DatasetId = "string",
                ModelId = "string",
                ProjectId = "string",
            },
            SourceTable = new Gcp.BigQuery.Inputs.JobExtractSourceTableArgs
            {
                TableId = "string",
                DatasetId = "string",
                ProjectId = "string",
            },
            UseAvroLogicalTypes = false,
        },
        JobTimeoutMs = "string",
        Labels = 
        {
            { "string", "string" },
        },
        Load = new Gcp.BigQuery.Inputs.JobLoadArgs
        {
            DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
            {
                TableId = "string",
                DatasetId = "string",
                ProjectId = "string",
            },
            SourceUris = new[]
            {
                "string",
            },
            MaxBadRecords = 0,
            NullMarker = "string",
            DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobLoadDestinationEncryptionConfigurationArgs
            {
                KmsKeyName = "string",
                KmsKeyVersion = "string",
            },
            Autodetect = false,
            Encoding = "string",
            FieldDelimiter = "string",
            IgnoreUnknownValues = false,
            JsonExtension = "string",
            AllowJaggedRows = false,
            CreateDisposition = "string",
            ParquetOptions = new Gcp.BigQuery.Inputs.JobLoadParquetOptionsArgs
            {
                EnableListInference = false,
                EnumAsString = false,
            },
            ProjectionFields = new[]
            {
                "string",
            },
            Quote = "string",
            SchemaUpdateOptions = new[]
            {
                "string",
            },
            SkipLeadingRows = 0,
            SourceFormat = "string",
            AllowQuotedNewlines = false,
            TimePartitioning = new Gcp.BigQuery.Inputs.JobLoadTimePartitioningArgs
            {
                Type = "string",
                ExpirationMs = "string",
                Field = "string",
            },
            WriteDisposition = "string",
        },
        Location = "string",
        Project = "string",
        Query = new Gcp.BigQuery.Inputs.JobQueryArgs
        {
            Query = "string",
            ParameterMode = "string",
            MaximumBytesBilled = "string",
            DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobQueryDestinationEncryptionConfigurationArgs
            {
                KmsKeyName = "string",
                KmsKeyVersion = "string",
            },
            DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
            {
                TableId = "string",
                DatasetId = "string",
                ProjectId = "string",
            },
            Priority = "string",
            MaximumBillingTier = 0,
            DefaultDataset = new Gcp.BigQuery.Inputs.JobQueryDefaultDatasetArgs
            {
                DatasetId = "string",
                ProjectId = "string",
            },
            AllowLargeResults = false,
            FlattenResults = false,
            CreateDisposition = "string",
            SchemaUpdateOptions = new[]
            {
                "string",
            },
            ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
            {
                KeyResultStatement = "string",
                StatementByteBudget = "string",
                StatementTimeoutMs = "string",
            },
            UseLegacySql = false,
            UseQueryCache = false,
            UserDefinedFunctionResources = new[]
            {
                new Gcp.BigQuery.Inputs.JobQueryUserDefinedFunctionResourceArgs
                {
                    InlineCode = "string",
                    ResourceUri = "string",
                },
            },
            WriteDisposition = "string",
        },
    });
    
    example, err := bigquery.NewJob(ctx, "jobResource", &bigquery.JobArgs{
    	JobId: pulumi.String("string"),
    	Copy: &bigquery.JobCopyArgs{
    		SourceTables: bigquery.JobCopySourceTableArray{
    			&bigquery.JobCopySourceTableArgs{
    				TableId:   pulumi.String("string"),
    				DatasetId: pulumi.String("string"),
    				ProjectId: pulumi.String("string"),
    			},
    		},
    		CreateDisposition: pulumi.String("string"),
    		DestinationEncryptionConfiguration: &bigquery.JobCopyDestinationEncryptionConfigurationArgs{
    			KmsKeyName:    pulumi.String("string"),
    			KmsKeyVersion: pulumi.String("string"),
    		},
    		DestinationTable: &bigquery.JobCopyDestinationTableArgs{
    			TableId:   pulumi.String("string"),
    			DatasetId: pulumi.String("string"),
    			ProjectId: pulumi.String("string"),
    		},
    		WriteDisposition: pulumi.String("string"),
    	},
    	Extract: &bigquery.JobExtractArgs{
    		DestinationUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Compression:       pulumi.String("string"),
    		DestinationFormat: pulumi.String("string"),
    		FieldDelimiter:    pulumi.String("string"),
    		PrintHeader:       pulumi.Bool(false),
    		SourceModel: &bigquery.JobExtractSourceModelArgs{
    			DatasetId: pulumi.String("string"),
    			ModelId:   pulumi.String("string"),
    			ProjectId: pulumi.String("string"),
    		},
    		SourceTable: &bigquery.JobExtractSourceTableArgs{
    			TableId:   pulumi.String("string"),
    			DatasetId: pulumi.String("string"),
    			ProjectId: pulumi.String("string"),
    		},
    		UseAvroLogicalTypes: pulumi.Bool(false),
    	},
    	JobTimeoutMs: pulumi.String("string"),
    	Labels: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	Load: &bigquery.JobLoadArgs{
    		DestinationTable: &bigquery.JobLoadDestinationTableArgs{
    			TableId:   pulumi.String("string"),
    			DatasetId: pulumi.String("string"),
    			ProjectId: pulumi.String("string"),
    		},
    		SourceUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		MaxBadRecords: pulumi.Int(0),
    		NullMarker:    pulumi.String("string"),
    		DestinationEncryptionConfiguration: &bigquery.JobLoadDestinationEncryptionConfigurationArgs{
    			KmsKeyName:    pulumi.String("string"),
    			KmsKeyVersion: pulumi.String("string"),
    		},
    		Autodetect:          pulumi.Bool(false),
    		Encoding:            pulumi.String("string"),
    		FieldDelimiter:      pulumi.String("string"),
    		IgnoreUnknownValues: pulumi.Bool(false),
    		JsonExtension:       pulumi.String("string"),
    		AllowJaggedRows:     pulumi.Bool(false),
    		CreateDisposition:   pulumi.String("string"),
    		ParquetOptions: &bigquery.JobLoadParquetOptionsArgs{
    			EnableListInference: pulumi.Bool(false),
    			EnumAsString:        pulumi.Bool(false),
    		},
    		ProjectionFields: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Quote: pulumi.String("string"),
    		SchemaUpdateOptions: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		SkipLeadingRows:     pulumi.Int(0),
    		SourceFormat:        pulumi.String("string"),
    		AllowQuotedNewlines: pulumi.Bool(false),
    		TimePartitioning: &bigquery.JobLoadTimePartitioningArgs{
    			Type:         pulumi.String("string"),
    			ExpirationMs: pulumi.String("string"),
    			Field:        pulumi.String("string"),
    		},
    		WriteDisposition: pulumi.String("string"),
    	},
    	Location: pulumi.String("string"),
    	Project:  pulumi.String("string"),
    	Query: &bigquery.JobQueryArgs{
    		Query:              pulumi.String("string"),
    		ParameterMode:      pulumi.String("string"),
    		MaximumBytesBilled: pulumi.String("string"),
    		DestinationEncryptionConfiguration: &bigquery.JobQueryDestinationEncryptionConfigurationArgs{
    			KmsKeyName:    pulumi.String("string"),
    			KmsKeyVersion: pulumi.String("string"),
    		},
    		DestinationTable: &bigquery.JobQueryDestinationTableArgs{
    			TableId:   pulumi.String("string"),
    			DatasetId: pulumi.String("string"),
    			ProjectId: pulumi.String("string"),
    		},
    		Priority:           pulumi.String("string"),
    		MaximumBillingTier: pulumi.Int(0),
    		DefaultDataset: &bigquery.JobQueryDefaultDatasetArgs{
    			DatasetId: pulumi.String("string"),
    			ProjectId: pulumi.String("string"),
    		},
    		AllowLargeResults: pulumi.Bool(false),
    		FlattenResults:    pulumi.Bool(false),
    		CreateDisposition: pulumi.String("string"),
    		SchemaUpdateOptions: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
    			KeyResultStatement:  pulumi.String("string"),
    			StatementByteBudget: pulumi.String("string"),
    			StatementTimeoutMs:  pulumi.String("string"),
    		},
    		UseLegacySql:  pulumi.Bool(false),
    		UseQueryCache: pulumi.Bool(false),
    		UserDefinedFunctionResources: bigquery.JobQueryUserDefinedFunctionResourceArray{
    			&bigquery.JobQueryUserDefinedFunctionResourceArgs{
    				InlineCode:  pulumi.String("string"),
    				ResourceUri: pulumi.String("string"),
    			},
    		},
    		WriteDisposition: pulumi.String("string"),
    	},
    })
    
    var jobResource = new Job("jobResource", JobArgs.builder()        
        .jobId("string")
        .copy(JobCopyArgs.builder()
            .sourceTables(JobCopySourceTableArgs.builder()
                .tableId("string")
                .datasetId("string")
                .projectId("string")
                .build())
            .createDisposition("string")
            .destinationEncryptionConfiguration(JobCopyDestinationEncryptionConfigurationArgs.builder()
                .kmsKeyName("string")
                .kmsKeyVersion("string")
                .build())
            .destinationTable(JobCopyDestinationTableArgs.builder()
                .tableId("string")
                .datasetId("string")
                .projectId("string")
                .build())
            .writeDisposition("string")
            .build())
        .extract(JobExtractArgs.builder()
            .destinationUris("string")
            .compression("string")
            .destinationFormat("string")
            .fieldDelimiter("string")
            .printHeader(false)
            .sourceModel(JobExtractSourceModelArgs.builder()
                .datasetId("string")
                .modelId("string")
                .projectId("string")
                .build())
            .sourceTable(JobExtractSourceTableArgs.builder()
                .tableId("string")
                .datasetId("string")
                .projectId("string")
                .build())
            .useAvroLogicalTypes(false)
            .build())
        .jobTimeoutMs("string")
        .labels(Map.of("string", "string"))
        .load(JobLoadArgs.builder()
            .destinationTable(JobLoadDestinationTableArgs.builder()
                .tableId("string")
                .datasetId("string")
                .projectId("string")
                .build())
            .sourceUris("string")
            .maxBadRecords(0)
            .nullMarker("string")
            .destinationEncryptionConfiguration(JobLoadDestinationEncryptionConfigurationArgs.builder()
                .kmsKeyName("string")
                .kmsKeyVersion("string")
                .build())
            .autodetect(false)
            .encoding("string")
            .fieldDelimiter("string")
            .ignoreUnknownValues(false)
            .jsonExtension("string")
            .allowJaggedRows(false)
            .createDisposition("string")
            .parquetOptions(JobLoadParquetOptionsArgs.builder()
                .enableListInference(false)
                .enumAsString(false)
                .build())
            .projectionFields("string")
            .quote("string")
            .schemaUpdateOptions("string")
            .skipLeadingRows(0)
            .sourceFormat("string")
            .allowQuotedNewlines(false)
            .timePartitioning(JobLoadTimePartitioningArgs.builder()
                .type("string")
                .expirationMs("string")
                .field("string")
                .build())
            .writeDisposition("string")
            .build())
        .location("string")
        .project("string")
        .query(JobQueryArgs.builder()
            .query("string")
            .parameterMode("string")
            .maximumBytesBilled("string")
            .destinationEncryptionConfiguration(JobQueryDestinationEncryptionConfigurationArgs.builder()
                .kmsKeyName("string")
                .kmsKeyVersion("string")
                .build())
            .destinationTable(JobQueryDestinationTableArgs.builder()
                .tableId("string")
                .datasetId("string")
                .projectId("string")
                .build())
            .priority("string")
            .maximumBillingTier(0)
            .defaultDataset(JobQueryDefaultDatasetArgs.builder()
                .datasetId("string")
                .projectId("string")
                .build())
            .allowLargeResults(false)
            .flattenResults(false)
            .createDisposition("string")
            .schemaUpdateOptions("string")
            .scriptOptions(JobQueryScriptOptionsArgs.builder()
                .keyResultStatement("string")
                .statementByteBudget("string")
                .statementTimeoutMs("string")
                .build())
            .useLegacySql(false)
            .useQueryCache(false)
            .userDefinedFunctionResources(JobQueryUserDefinedFunctionResourceArgs.builder()
                .inlineCode("string")
                .resourceUri("string")
                .build())
            .writeDisposition("string")
            .build())
        .build());
    
    job_resource = gcp.bigquery.Job("jobResource",
        job_id="string",
        copy=gcp.bigquery.JobCopyArgs(
            source_tables=[gcp.bigquery.JobCopySourceTableArgs(
                table_id="string",
                dataset_id="string",
                project_id="string",
            )],
            create_disposition="string",
            destination_encryption_configuration=gcp.bigquery.JobCopyDestinationEncryptionConfigurationArgs(
                kms_key_name="string",
                kms_key_version="string",
            ),
            destination_table=gcp.bigquery.JobCopyDestinationTableArgs(
                table_id="string",
                dataset_id="string",
                project_id="string",
            ),
            write_disposition="string",
        ),
        extract=gcp.bigquery.JobExtractArgs(
            destination_uris=["string"],
            compression="string",
            destination_format="string",
            field_delimiter="string",
            print_header=False,
            source_model=gcp.bigquery.JobExtractSourceModelArgs(
                dataset_id="string",
                model_id="string",
                project_id="string",
            ),
            source_table=gcp.bigquery.JobExtractSourceTableArgs(
                table_id="string",
                dataset_id="string",
                project_id="string",
            ),
            use_avro_logical_types=False,
        ),
        job_timeout_ms="string",
        labels={
            "string": "string",
        },
        load=gcp.bigquery.JobLoadArgs(
            destination_table=gcp.bigquery.JobLoadDestinationTableArgs(
                table_id="string",
                dataset_id="string",
                project_id="string",
            ),
            source_uris=["string"],
            max_bad_records=0,
            null_marker="string",
            destination_encryption_configuration=gcp.bigquery.JobLoadDestinationEncryptionConfigurationArgs(
                kms_key_name="string",
                kms_key_version="string",
            ),
            autodetect=False,
            encoding="string",
            field_delimiter="string",
            ignore_unknown_values=False,
            json_extension="string",
            allow_jagged_rows=False,
            create_disposition="string",
            parquet_options=gcp.bigquery.JobLoadParquetOptionsArgs(
                enable_list_inference=False,
                enum_as_string=False,
            ),
            projection_fields=["string"],
            quote="string",
            schema_update_options=["string"],
            skip_leading_rows=0,
            source_format="string",
            allow_quoted_newlines=False,
            time_partitioning=gcp.bigquery.JobLoadTimePartitioningArgs(
                type="string",
                expiration_ms="string",
                field="string",
            ),
            write_disposition="string",
        ),
        location="string",
        project="string",
        query=gcp.bigquery.JobQueryArgs(
            query="string",
            parameter_mode="string",
            maximum_bytes_billed="string",
            destination_encryption_configuration=gcp.bigquery.JobQueryDestinationEncryptionConfigurationArgs(
                kms_key_name="string",
                kms_key_version="string",
            ),
            destination_table=gcp.bigquery.JobQueryDestinationTableArgs(
                table_id="string",
                dataset_id="string",
                project_id="string",
            ),
            priority="string",
            maximum_billing_tier=0,
            default_dataset=gcp.bigquery.JobQueryDefaultDatasetArgs(
                dataset_id="string",
                project_id="string",
            ),
            allow_large_results=False,
            flatten_results=False,
            create_disposition="string",
            schema_update_options=["string"],
            script_options=gcp.bigquery.JobQueryScriptOptionsArgs(
                key_result_statement="string",
                statement_byte_budget="string",
                statement_timeout_ms="string",
            ),
            use_legacy_sql=False,
            use_query_cache=False,
            user_defined_function_resources=[gcp.bigquery.JobQueryUserDefinedFunctionResourceArgs(
                inline_code="string",
                resource_uri="string",
            )],
            write_disposition="string",
        ))
    
    const jobResource = new gcp.bigquery.Job("jobResource", {
        jobId: "string",
        copy: {
            sourceTables: [{
                tableId: "string",
                datasetId: "string",
                projectId: "string",
            }],
            createDisposition: "string",
            destinationEncryptionConfiguration: {
                kmsKeyName: "string",
                kmsKeyVersion: "string",
            },
            destinationTable: {
                tableId: "string",
                datasetId: "string",
                projectId: "string",
            },
            writeDisposition: "string",
        },
        extract: {
            destinationUris: ["string"],
            compression: "string",
            destinationFormat: "string",
            fieldDelimiter: "string",
            printHeader: false,
            sourceModel: {
                datasetId: "string",
                modelId: "string",
                projectId: "string",
            },
            sourceTable: {
                tableId: "string",
                datasetId: "string",
                projectId: "string",
            },
            useAvroLogicalTypes: false,
        },
        jobTimeoutMs: "string",
        labels: {
            string: "string",
        },
        load: {
            destinationTable: {
                tableId: "string",
                datasetId: "string",
                projectId: "string",
            },
            sourceUris: ["string"],
            maxBadRecords: 0,
            nullMarker: "string",
            destinationEncryptionConfiguration: {
                kmsKeyName: "string",
                kmsKeyVersion: "string",
            },
            autodetect: false,
            encoding: "string",
            fieldDelimiter: "string",
            ignoreUnknownValues: false,
            jsonExtension: "string",
            allowJaggedRows: false,
            createDisposition: "string",
            parquetOptions: {
                enableListInference: false,
                enumAsString: false,
            },
            projectionFields: ["string"],
            quote: "string",
            schemaUpdateOptions: ["string"],
            skipLeadingRows: 0,
            sourceFormat: "string",
            allowQuotedNewlines: false,
            timePartitioning: {
                type: "string",
                expirationMs: "string",
                field: "string",
            },
            writeDisposition: "string",
        },
        location: "string",
        project: "string",
        query: {
            query: "string",
            parameterMode: "string",
            maximumBytesBilled: "string",
            destinationEncryptionConfiguration: {
                kmsKeyName: "string",
                kmsKeyVersion: "string",
            },
            destinationTable: {
                tableId: "string",
                datasetId: "string",
                projectId: "string",
            },
            priority: "string",
            maximumBillingTier: 0,
            defaultDataset: {
                datasetId: "string",
                projectId: "string",
            },
            allowLargeResults: false,
            flattenResults: false,
            createDisposition: "string",
            schemaUpdateOptions: ["string"],
            scriptOptions: {
                keyResultStatement: "string",
                statementByteBudget: "string",
                statementTimeoutMs: "string",
            },
            useLegacySql: false,
            useQueryCache: false,
            userDefinedFunctionResources: [{
                inlineCode: "string",
                resourceUri: "string",
            }],
            writeDisposition: "string",
        },
    });
    
    type: gcp:bigquery:Job
    properties:
        copy:
            createDisposition: string
            destinationEncryptionConfiguration:
                kmsKeyName: string
                kmsKeyVersion: string
            destinationTable:
                datasetId: string
                projectId: string
                tableId: string
            sourceTables:
                - datasetId: string
                  projectId: string
                  tableId: string
            writeDisposition: string
        extract:
            compression: string
            destinationFormat: string
            destinationUris:
                - string
            fieldDelimiter: string
            printHeader: false
            sourceModel:
                datasetId: string
                modelId: string
                projectId: string
            sourceTable:
                datasetId: string
                projectId: string
                tableId: string
            useAvroLogicalTypes: false
        jobId: string
        jobTimeoutMs: string
        labels:
            string: string
        load:
            allowJaggedRows: false
            allowQuotedNewlines: false
            autodetect: false
            createDisposition: string
            destinationEncryptionConfiguration:
                kmsKeyName: string
                kmsKeyVersion: string
            destinationTable:
                datasetId: string
                projectId: string
                tableId: string
            encoding: string
            fieldDelimiter: string
            ignoreUnknownValues: false
            jsonExtension: string
            maxBadRecords: 0
            nullMarker: string
            parquetOptions:
                enableListInference: false
                enumAsString: false
            projectionFields:
                - string
            quote: string
            schemaUpdateOptions:
                - string
            skipLeadingRows: 0
            sourceFormat: string
            sourceUris:
                - string
            timePartitioning:
                expirationMs: string
                field: string
                type: string
            writeDisposition: string
        location: string
        project: string
        query:
            allowLargeResults: false
            createDisposition: string
            defaultDataset:
                datasetId: string
                projectId: string
            destinationEncryptionConfiguration:
                kmsKeyName: string
                kmsKeyVersion: string
            destinationTable:
                datasetId: string
                projectId: string
                tableId: string
            flattenResults: false
            maximumBillingTier: 0
            maximumBytesBilled: string
            parameterMode: string
            priority: string
            query: string
            schemaUpdateOptions:
                - string
            scriptOptions:
                keyResultStatement: string
                statementByteBudget: string
                statementTimeoutMs: string
            useLegacySql: false
            useQueryCache: false
            userDefinedFunctionResources:
                - inlineCode: string
                  resourceUri: string
            writeDisposition: string
    

    Job Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Job resource accepts the following input properties:

    JobId string
    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
    Copy JobCopy
    Copies a table. Structure is documented below.
    Extract JobExtract
    Configures an extract job. Structure is documented below.
    JobTimeoutMs string
    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
    Labels Dictionary<string, string>

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    Load JobLoad
    Configures a load job. Structure is documented below.
    Location string
    The geographic location of the job. The default value is US.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    Query JobQuery
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    JobId string
    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
    Copy JobCopyArgs
    Copies a table. Structure is documented below.
    Extract JobExtractArgs
    Configures an extract job. Structure is documented below.
    JobTimeoutMs string
    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
    Labels map[string]string

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    Load JobLoadArgs
    Configures a load job. Structure is documented below.
    Location string
    The geographic location of the job. The default value is US.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    Query JobQueryArgs
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    jobId String
    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
    copy JobCopy
    Copies a table. Structure is documented below.
    extract JobExtract
    Configures an extract job. Structure is documented below.
    jobTimeoutMs String
    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
    labels Map<String,String>

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load JobLoad
    Configures a load job. Structure is documented below.
    location String
    The geographic location of the job. The default value is US.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    query JobQuery
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    jobId string
    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
    copy JobCopy
    Copies a table. Structure is documented below.
    extract JobExtract
    Configures an extract job. Structure is documented below.
    jobTimeoutMs string
    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
    labels {[key: string]: string}

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load JobLoad
    Configures a load job. Structure is documented below.
    location string
    The geographic location of the job. The default value is US.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    query JobQuery
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    job_id str
    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
    copy JobCopyArgs
    Copies a table. Structure is documented below.
    extract JobExtractArgs
    Configures an extract job. Structure is documented below.
    job_timeout_ms str
    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
    labels Mapping[str, str]

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load JobLoadArgs
    Configures a load job. Structure is documented below.
    location str
    The geographic location of the job. The default value is US.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    query JobQueryArgs
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    jobId String
    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
    copy Property Map
    Copies a table. Structure is documented below.
    extract Property Map
    Configures an extract job. Structure is documented below.
    jobTimeoutMs String
    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
    labels Map<String>

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load Property Map
    Configures a load job. Structure is documented below.
    location String
    The geographic location of the job. The default value is US.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    query Property Map
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Job resource produces the following output properties:

    EffectiveLabels Dictionary<string, string>
    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Id string
    The provider-assigned unique ID for this managed resource.
    JobType string
    (Output) The type of the job.
    PulumiLabels Dictionary<string, string>
    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
    Statuses List<JobStatus>
    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
    UserEmail string
    Email address of the user who ran the job.
    EffectiveLabels map[string]string
    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Id string
    The provider-assigned unique ID for this managed resource.
    JobType string
    (Output) The type of the job.
    PulumiLabels map[string]string
    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
    Statuses []JobStatus
    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
    UserEmail string
    Email address of the user who ran the job.
    effectiveLabels Map<String,String>
    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id String
    The provider-assigned unique ID for this managed resource.
    jobType String
    (Output) The type of the job.
    pulumiLabels Map<String,String>
    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
    statuses List<JobStatus>
    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
    userEmail String
    Email address of the user who ran the job.
    effectiveLabels {[key: string]: string}
    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id string
    The provider-assigned unique ID for this managed resource.
    jobType string
    (Output) The type of the job.
    pulumiLabels {[key: string]: string}
    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
    statuses JobStatus[]
    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
    userEmail string
    Email address of the user who ran the job.
    effective_labels Mapping[str, str]
    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id str
    The provider-assigned unique ID for this managed resource.
    job_type str
    (Output) The type of the job.
    pulumi_labels Mapping[str, str]
    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
    statuses Sequence[JobStatus]
    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
    user_email str
    Email address of the user who ran the job.
    effectiveLabels Map<String>
    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id String
    The provider-assigned unique ID for this managed resource.
    jobType String
    (Output) The type of the job.
    pulumiLabels Map<String>
    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
    statuses List<Property Map>
    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
    userEmail String
    Email address of the user who ran the job.

    Look up Existing Job Resource

    Get an existing Job resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: JobState, opts?: CustomResourceOptions): Job
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            copy: Optional[JobCopyArgs] = None,
            effective_labels: Optional[Mapping[str, str]] = None,
            extract: Optional[JobExtractArgs] = None,
            job_id: Optional[str] = None,
            job_timeout_ms: Optional[str] = None,
            job_type: Optional[str] = None,
            labels: Optional[Mapping[str, str]] = None,
            load: Optional[JobLoadArgs] = None,
            location: Optional[str] = None,
            project: Optional[str] = None,
            pulumi_labels: Optional[Mapping[str, str]] = None,
            query: Optional[JobQueryArgs] = None,
            statuses: Optional[Sequence[JobStatusArgs]] = None,
            user_email: Optional[str] = None) -> Job
    func GetJob(ctx *Context, name string, id IDInput, state *JobState, opts ...ResourceOption) (*Job, error)
    public static Job Get(string name, Input<string> id, JobState? state, CustomResourceOptions? opts = null)
    public static Job get(String name, Output<String> id, JobState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Copy JobCopy
    Copies a table. Structure is documented below.
    EffectiveLabels Dictionary<string, string>
    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Extract JobExtract
    Configures an extract job. Structure is documented below.
    JobId string
    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
    JobTimeoutMs string
    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
    JobType string
    (Output) The type of the job.
    Labels Dictionary<string, string>

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    Load JobLoad
    Configures a load job. Structure is documented below.
    Location string
    The geographic location of the job. The default value is US.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PulumiLabels Dictionary<string, string>
    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
    Query JobQuery
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    Statuses List<JobStatus>
    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
    UserEmail string
    Email address of the user who ran the job.
    Copy JobCopyArgs
    Copies a table. Structure is documented below.
    EffectiveLabels map[string]string
    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Extract JobExtractArgs
    Configures an extract job. Structure is documented below.
    JobId string
    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
    JobTimeoutMs string
    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
    JobType string
    (Output) The type of the job.
    Labels map[string]string

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    Load JobLoadArgs
    Configures a load job. Structure is documented below.
    Location string
    The geographic location of the job. The default value is US.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PulumiLabels map[string]string
    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
    Query JobQueryArgs
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    Statuses []JobStatusArgs
    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
    UserEmail string
    Email address of the user who ran the job.
    copy JobCopy
    Copies a table. Structure is documented below.
    effectiveLabels Map<String,String>
    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    extract JobExtract
    Configures an extract job. Structure is documented below.
    jobId String
    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
    jobTimeoutMs String
    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
    jobType String
    (Output) The type of the job.
    labels Map<String,String>

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load JobLoad
    Configures a load job. Structure is documented below.
    location String
    The geographic location of the job. The default value is US.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels Map<String,String>
    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
    query JobQuery
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    statuses List<JobStatus>
    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
    userEmail String
    Email address of the user who ran the job.
    copy JobCopy
    Copies a table. Structure is documented below.
    effectiveLabels {[key: string]: string}
    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    extract JobExtract
    Configures an extract job. Structure is documented below.
    jobId string
    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
    jobTimeoutMs string
    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
    jobType string
    (Output) The type of the job.
    labels {[key: string]: string}

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load JobLoad
    Configures a load job. Structure is documented below.
    location string
    The geographic location of the job. The default value is US.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels {[key: string]: string}
    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
    query JobQuery
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    statuses JobStatus[]
    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
    userEmail string
    Email address of the user who ran the job.
    copy JobCopyArgs
    Copies a table. Structure is documented below.
    effective_labels Mapping[str, str]
    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    extract JobExtractArgs
    Configures an extract job. Structure is documented below.
    job_id str
    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
    job_timeout_ms str
    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
    job_type str
    (Output) The type of the job.
    labels Mapping[str, str]

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load JobLoadArgs
    Configures a load job. Structure is documented below.
    location str
    The geographic location of the job. The default value is US.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumi_labels Mapping[str, str]
    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
    query JobQueryArgs
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    statuses Sequence[JobStatusArgs]
    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
    user_email str
    Email address of the user who ran the job.
    copy Property Map
    Copies a table. Structure is documented below.
    effectiveLabels Map<String>
    (Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    extract Property Map
    Configures an extract job. Structure is documented below.
    jobId String
    The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
    jobTimeoutMs String
    Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
    jobType String
    (Output) The type of the job.
    labels Map<String>

    The labels associated with this job. You can use these to organize and group your jobs.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    load Property Map
    Configures a load job. Structure is documented below.
    location String
    The geographic location of the job. The default value is US.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels Map<String>
    (Output) The combination of labels configured directly on the resource and default labels configured on the provider.
    query Property Map
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    statuses List<Property Map>
    The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
    userEmail String
    Email address of the user who ran the job.

    Supporting Types

    JobCopy, JobCopyArgs

    SourceTables List<JobCopySourceTable>
    Source tables to copy. Structure is documented below.
    CreateDisposition string
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    DestinationEncryptionConfiguration JobCopyDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    DestinationTable JobCopyDestinationTable
    The destination table. Structure is documented below.
    WriteDisposition string
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    SourceTables []JobCopySourceTable
    Source tables to copy. Structure is documented below.
    CreateDisposition string
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    DestinationEncryptionConfiguration JobCopyDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    DestinationTable JobCopyDestinationTable
    The destination table. Structure is documented below.
    WriteDisposition string
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    sourceTables List<JobCopySourceTable>
    Source tables to copy. Structure is documented below.
    createDisposition String
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    destinationEncryptionConfiguration JobCopyDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    destinationTable JobCopyDestinationTable
    The destination table. Structure is documented below.
    writeDisposition String
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    sourceTables JobCopySourceTable[]
    Source tables to copy. Structure is documented below.
    createDisposition string
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    destinationEncryptionConfiguration JobCopyDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    destinationTable JobCopyDestinationTable
    The destination table. Structure is documented below.
    writeDisposition string
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    source_tables Sequence[JobCopySourceTable]
    Source tables to copy. Structure is documented below.
    create_disposition str
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    destination_encryption_configuration JobCopyDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    destination_table JobCopyDestinationTable
    The destination table. Structure is documented below.
    write_disposition str
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    sourceTables List<Property Map>
    Source tables to copy. Structure is documented below.
    createDisposition String
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    destinationEncryptionConfiguration Property Map
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    destinationTable Property Map
    The destination table. Structure is documented below.
    writeDisposition String
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    JobCopyDestinationEncryptionConfiguration, JobCopyDestinationEncryptionConfigurationArgs

    KmsKeyName string
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    KmsKeyVersion string
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    KmsKeyName string
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    KmsKeyVersion string
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    kmsKeyName String
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    kmsKeyVersion String
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    kmsKeyName string
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    kmsKeyVersion string
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    kms_key_name str
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    kms_key_version str
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    kmsKeyName String
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    kmsKeyVersion String
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    JobCopyDestinationTable, JobCopyDestinationTableArgs

    TableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    TableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    tableId String
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.
    tableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId string
    The ID of the dataset containing this table.
    projectId string
    The ID of the project containing this table.
    table_id str
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    dataset_id str
    The ID of the dataset containing this table.
    project_id str
    The ID of the project containing this table.
    tableId String
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.

    JobCopySourceTable, JobCopySourceTableArgs

    TableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    TableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    tableId String
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.
    tableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId string
    The ID of the dataset containing this table.
    projectId string
    The ID of the project containing this table.
    table_id str
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    dataset_id str
    The ID of the dataset containing this table.
    project_id str
    The ID of the project containing this table.
    tableId String
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.

    JobExtract, JobExtractArgs

    DestinationUris List<string>
    A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
    Compression string
    The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
    DestinationFormat string
    The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
    FieldDelimiter string
    When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
    PrintHeader bool
    Whether to print out a header row in the results. Default is true.
    SourceModel JobExtractSourceModel
    A reference to the model being exported. Structure is documented below.
    SourceTable JobExtractSourceTable
    A reference to the table being exported. Structure is documented below.
    UseAvroLogicalTypes bool
    Whether to use logical types when extracting to AVRO format.
    DestinationUris []string
    A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
    Compression string
    The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
    DestinationFormat string
    The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
    FieldDelimiter string
    When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
    PrintHeader bool
    Whether to print out a header row in the results. Default is true.
    SourceModel JobExtractSourceModel
    A reference to the model being exported. Structure is documented below.
    SourceTable JobExtractSourceTable
    A reference to the table being exported. Structure is documented below.
    UseAvroLogicalTypes bool
    Whether to use logical types when extracting to AVRO format.
    destinationUris List<String>
    A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
    compression String
    The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
    destinationFormat String
    The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
    fieldDelimiter String
    When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
    printHeader Boolean
    Whether to print out a header row in the results. Default is true.
    sourceModel JobExtractSourceModel
    A reference to the model being exported. Structure is documented below.
    sourceTable JobExtractSourceTable
    A reference to the table being exported. Structure is documented below.
    useAvroLogicalTypes Boolean
    Whether to use logical types when extracting to AVRO format.
    destinationUris string[]
    A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
    compression string
    The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
    destinationFormat string
    The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
    fieldDelimiter string
    When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
    printHeader boolean
    Whether to print out a header row in the results. Default is true.
    sourceModel JobExtractSourceModel
    A reference to the model being exported. Structure is documented below.
    sourceTable JobExtractSourceTable
    A reference to the table being exported. Structure is documented below.
    useAvroLogicalTypes boolean
    Whether to use logical types when extracting to AVRO format.
    destination_uris Sequence[str]
    A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
    compression str
    The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
    destination_format str
    The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
    field_delimiter str
    When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
    print_header bool
    Whether to print out a header row in the results. Default is true.
    source_model JobExtractSourceModel
    A reference to the model being exported. Structure is documented below.
    source_table JobExtractSourceTable
    A reference to the table being exported. Structure is documented below.
    use_avro_logical_types bool
    Whether to use logical types when extracting to AVRO format.
    destinationUris List<String>
    A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
    compression String
    The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
    destinationFormat String
    The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
    fieldDelimiter String
    When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
    printHeader Boolean
    Whether to print out a header row in the results. Default is true.
    sourceModel Property Map
    A reference to the model being exported. Structure is documented below.
    sourceTable Property Map
    A reference to the table being exported. Structure is documented below.
    useAvroLogicalTypes Boolean
    Whether to use logical types when extracting to AVRO format.

    JobExtractSourceModel, JobExtractSourceModelArgs

    DatasetId string
    The ID of the dataset containing this model.
    ModelId string
    The ID of the model.


    ProjectId string
    The ID of the project containing this model.
    DatasetId string
    The ID of the dataset containing this model.
    ModelId string
    The ID of the model.


    ProjectId string
    The ID of the project containing this model.
    datasetId String
    The ID of the dataset containing this model.
    modelId String
    The ID of the model.


    projectId String
    The ID of the project containing this model.
    datasetId string
    The ID of the dataset containing this model.
    modelId string
    The ID of the model.


    projectId string
    The ID of the project containing this model.
    dataset_id str
    The ID of the dataset containing this model.
    model_id str
    The ID of the model.


    project_id str
    The ID of the project containing this model.
    datasetId String
    The ID of the dataset containing this model.
    modelId String
    The ID of the model.


    projectId String
    The ID of the project containing this model.

    JobExtractSourceTable, JobExtractSourceTableArgs

    TableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    TableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    tableId String
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.
    tableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId string
    The ID of the dataset containing this table.
    projectId string
    The ID of the project containing this table.
    table_id str
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    dataset_id str
    The ID of the dataset containing this table.
    project_id str
    The ID of the project containing this table.
    tableId String
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.

    JobLoad, JobLoadArgs

    DestinationTable JobLoadDestinationTable
    The destination table to load the data into. Structure is documented below.
    SourceUris List<string>
    The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
    AllowJaggedRows bool
    Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
    AllowQuotedNewlines bool
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    Autodetect bool
    Indicates if we should automatically infer the options and schema for CSV and JSON sources.
    CreateDisposition string
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    DestinationEncryptionConfiguration JobLoadDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    Encoding string
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
    FieldDelimiter string
    The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
    IgnoreUnknownValues bool
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
    JsonExtension string
    If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
    MaxBadRecords int
    The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
    NullMarker string
    Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
    ParquetOptions JobLoadParquetOptions
    Parquet Options for load and make external tables. Structure is documented below.
    ProjectionFields List<string>
    If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
    Quote string
    The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
    SchemaUpdateOptions List<string>
    Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
    SkipLeadingRows int
    The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
    SourceFormat string
    The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
    TimePartitioning JobLoadTimePartitioning
    Time-based partitioning specification for the destination table. Structure is documented below.
    WriteDisposition string
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    DestinationTable JobLoadDestinationTable
    The destination table to load the data into. Structure is documented below.
    SourceUris []string
    The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
    AllowJaggedRows bool
    Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
    AllowQuotedNewlines bool
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    Autodetect bool
    Indicates if we should automatically infer the options and schema for CSV and JSON sources.
    CreateDisposition string
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    DestinationEncryptionConfiguration JobLoadDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    Encoding string
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
    FieldDelimiter string
    The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
    IgnoreUnknownValues bool
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
    JsonExtension string
    If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
    MaxBadRecords int
    The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
    NullMarker string
    Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
    ParquetOptions JobLoadParquetOptions
    Parquet Options for load and make external tables. Structure is documented below.
    ProjectionFields []string
    If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
    Quote string
    The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
    SchemaUpdateOptions []string
    Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
    SkipLeadingRows int
    The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
    SourceFormat string
    The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
    TimePartitioning JobLoadTimePartitioning
    Time-based partitioning specification for the destination table. Structure is documented below.
    WriteDisposition string
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    destinationTable JobLoadDestinationTable
    The destination table to load the data into. Structure is documented below.
    sourceUris List<String>
    The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
    allowJaggedRows Boolean
    Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
    allowQuotedNewlines Boolean
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    autodetect Boolean
    Indicates if we should automatically infer the options and schema for CSV and JSON sources.
    createDisposition String
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    destinationEncryptionConfiguration JobLoadDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    encoding String
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
    fieldDelimiter String
    The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
    ignoreUnknownValues Boolean
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
    jsonExtension String
    If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
    maxBadRecords Integer
    The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
    nullMarker String
    Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
    parquetOptions JobLoadParquetOptions
    Parquet Options for load and make external tables. Structure is documented below.
    projectionFields List<String>
    If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
    quote String
    The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
    schemaUpdateOptions List<String>
    Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
    skipLeadingRows Integer
    The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
    sourceFormat String
    The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
    timePartitioning JobLoadTimePartitioning
    Time-based partitioning specification for the destination table. Structure is documented below.
    writeDisposition String
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    destinationTable JobLoadDestinationTable
    The destination table to load the data into. Structure is documented below.
    sourceUris string[]
    The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
    allowJaggedRows boolean
    Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
    allowQuotedNewlines boolean
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    autodetect boolean
    Indicates if we should automatically infer the options and schema for CSV and JSON sources.
    createDisposition string
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    destinationEncryptionConfiguration JobLoadDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    encoding string
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
    fieldDelimiter string
    The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
    ignoreUnknownValues boolean
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
    jsonExtension string
    If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
    maxBadRecords number
    The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
    nullMarker string
    Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
    parquetOptions JobLoadParquetOptions
    Parquet Options for load and make external tables. Structure is documented below.
    projectionFields string[]
    If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
    quote string
    The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
    schemaUpdateOptions string[]
    Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
    skipLeadingRows number
    The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
    sourceFormat string
    The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
    timePartitioning JobLoadTimePartitioning
    Time-based partitioning specification for the destination table. Structure is documented below.
    writeDisposition string
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    destination_table JobLoadDestinationTable
    The destination table to load the data into. Structure is documented below.
    source_uris Sequence[str]
    The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
    allow_jagged_rows bool
    Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
    allow_quoted_newlines bool
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    autodetect bool
    Indicates if we should automatically infer the options and schema for CSV and JSON sources.
    create_disposition str
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    destination_encryption_configuration JobLoadDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    encoding str
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
    field_delimiter str
    The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
    ignore_unknown_values bool
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
    json_extension str
    If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
    max_bad_records int
    The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
    null_marker str
    Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
    parquet_options JobLoadParquetOptions
    Parquet Options for load and make external tables. Structure is documented below.
    projection_fields Sequence[str]
    If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
    quote str
    The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
    schema_update_options Sequence[str]
    Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
    skip_leading_rows int
    The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
    source_format str
    The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
    time_partitioning JobLoadTimePartitioning
    Time-based partitioning specification for the destination table. Structure is documented below.
    write_disposition str
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    destinationTable Property Map
    The destination table to load the data into. Structure is documented below.
    sourceUris List<String>
    The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
    allowJaggedRows Boolean
    Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
    allowQuotedNewlines Boolean
    Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
    autodetect Boolean
    Indicates if we should automatically infer the options and schema for CSV and JSON sources.
    createDisposition String
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    destinationEncryptionConfiguration Property Map
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    encoding String
    The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
    fieldDelimiter String
    The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
    ignoreUnknownValues Boolean
    Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
    jsonExtension String
    If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
    maxBadRecords Number
    The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
    nullMarker String
    Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
    parquetOptions Property Map
    Parquet Options for load and make external tables. Structure is documented below.
    projectionFields List<String>
    If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
    quote String
    The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
    schemaUpdateOptions List<String>
    Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
    skipLeadingRows Number
    The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
    sourceFormat String
    The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
    timePartitioning Property Map
    Time-based partitioning specification for the destination table. Structure is documented below.
    writeDisposition String
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    JobLoadDestinationEncryptionConfiguration, JobLoadDestinationEncryptionConfigurationArgs

    KmsKeyName string
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    KmsKeyVersion string
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    KmsKeyName string
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    KmsKeyVersion string
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    kmsKeyName String
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    kmsKeyVersion String
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    kmsKeyName string
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    kmsKeyVersion string
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    kms_key_name str
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    kms_key_version str
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    kmsKeyName String
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    kmsKeyVersion String
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    JobLoadDestinationTable, JobLoadDestinationTableArgs

    TableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    TableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    tableId String
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.
    tableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId string
    The ID of the dataset containing this table.
    projectId string
    The ID of the project containing this table.
    table_id str
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    dataset_id str
    The ID of the dataset containing this table.
    project_id str
    The ID of the project containing this table.
    tableId String
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.

    JobLoadParquetOptions, JobLoadParquetOptionsArgs

    EnableListInference bool
    If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
    EnumAsString bool
    If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    EnableListInference bool
    If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
    EnumAsString bool
    If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    enableListInference Boolean
    If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
    enumAsString Boolean
    If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    enableListInference boolean
    If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
    enumAsString boolean
    If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    enable_list_inference bool
    If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
    enum_as_string bool
    If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
    enableListInference Boolean
    If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
    enumAsString Boolean
    If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.

    JobLoadTimePartitioning, JobLoadTimePartitioningArgs

    Type string
    The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
    ExpirationMs string
    Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
    Field string
    If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
    Type string
    The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
    ExpirationMs string
    Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
    Field string
    If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
    type String
    The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
    expirationMs String
    Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
    field String
    If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
    type string
    The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
    expirationMs string
    Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
    field string
    If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
    type str
    The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
    expiration_ms str
    Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
    field str
    If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
    type String
    The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
    expirationMs String
    Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
    field String
    If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.

    JobQuery, JobQueryArgs

    Query string
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    AllowLargeResults bool
    If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
    CreateDisposition string
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    DefaultDataset JobQueryDefaultDataset
    Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
    DestinationEncryptionConfiguration JobQueryDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    DestinationTable JobQueryDestinationTable
    Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
    FlattenResults bool
    If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
    MaximumBillingTier int
    Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
    MaximumBytesBilled string
    Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
    ParameterMode string
    Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
    Priority string
    Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.
    SchemaUpdateOptions List<string>
    Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
    ScriptOptions JobQueryScriptOptions
    Options controlling the execution of scripts. Structure is documented below.
    UseLegacySql bool
    Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
    UseQueryCache bool
    Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
    UserDefinedFunctionResources List<JobQueryUserDefinedFunctionResource>
    Describes user-defined function resources used in the query. Structure is documented below.
    WriteDisposition string
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    Query string
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    AllowLargeResults bool
    If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
    CreateDisposition string
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    DefaultDataset JobQueryDefaultDataset
    Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
    DestinationEncryptionConfiguration JobQueryDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    DestinationTable JobQueryDestinationTable
    Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
    FlattenResults bool
    If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
    MaximumBillingTier int
    Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
    MaximumBytesBilled string
    Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
    ParameterMode string
    Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
    Priority string
    Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.
    SchemaUpdateOptions []string
    Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
    ScriptOptions JobQueryScriptOptions
    Options controlling the execution of scripts. Structure is documented below.
    UseLegacySql bool
    Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
    UseQueryCache bool
    Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
    UserDefinedFunctionResources []JobQueryUserDefinedFunctionResource
    Describes user-defined function resources used in the query. Structure is documented below.
    WriteDisposition string
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    query String
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    allowLargeResults Boolean
    If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
    createDisposition String
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    defaultDataset JobQueryDefaultDataset
    Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
    destinationEncryptionConfiguration JobQueryDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    destinationTable JobQueryDestinationTable
    Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
    flattenResults Boolean
    If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
    maximumBillingTier Integer
    Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
    maximumBytesBilled String
    Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
    parameterMode String
    Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
    priority String
    Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.
    schemaUpdateOptions List<String>
    Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
    scriptOptions JobQueryScriptOptions
    Options controlling the execution of scripts. Structure is documented below.
    useLegacySql Boolean
    Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
    useQueryCache Boolean
    Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
    userDefinedFunctionResources List<JobQueryUserDefinedFunctionResource>
    Describes user-defined function resources used in the query. Structure is documented below.
    writeDisposition String
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    query string
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    allowLargeResults boolean
    If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
    createDisposition string
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    defaultDataset JobQueryDefaultDataset
    Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
    destinationEncryptionConfiguration JobQueryDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    destinationTable JobQueryDestinationTable
    Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
    flattenResults boolean
    If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
    maximumBillingTier number
    Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
    maximumBytesBilled string
    Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
    parameterMode string
    Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
    priority string
    Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.
    schemaUpdateOptions string[]
    Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
    scriptOptions JobQueryScriptOptions
    Options controlling the execution of scripts. Structure is documented below.
    useLegacySql boolean
    Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
    useQueryCache boolean
    Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
    userDefinedFunctionResources JobQueryUserDefinedFunctionResource[]
    Describes user-defined function resources used in the query. Structure is documented below.
    writeDisposition string
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    query str
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    allow_large_results bool
    If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
    create_disposition str
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    default_dataset JobQueryDefaultDataset
    Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
    destination_encryption_configuration JobQueryDestinationEncryptionConfiguration
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    destination_table JobQueryDestinationTable
    Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
    flatten_results bool
    If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
    maximum_billing_tier int
    Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
    maximum_bytes_billed str
    Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
    parameter_mode str
    Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
    priority str
    Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.
    schema_update_options Sequence[str]
    Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
    script_options JobQueryScriptOptions
    Options controlling the execution of scripts. Structure is documented below.
    use_legacy_sql bool
    Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
    use_query_cache bool
    Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
    user_defined_function_resources Sequence[JobQueryUserDefinedFunctionResource]
    Describes user-defined function resources used in the query. Structure is documented below.
    write_disposition str
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
    query String
    SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
    allowLargeResults Boolean
    If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
    createDisposition String
    Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
    defaultDataset Property Map
    Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
    destinationEncryptionConfiguration Property Map
    Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
    destinationTable Property Map
    Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
    flattenResults Boolean
    If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
    maximumBillingTier Number
    Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
    maximumBytesBilled String
    Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
    parameterMode String
    Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
    priority String
    Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.
    schemaUpdateOptions List<String>
    Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
    scriptOptions Property Map
    Options controlling the execution of scripts. Structure is documented below.
    useLegacySql Boolean
    Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
    useQueryCache Boolean
    Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
    userDefinedFunctionResources List<Property Map>
    Describes user-defined function resources used in the query. Structure is documented below.
    writeDisposition String
    Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

    JobQueryDefaultDataset, JobQueryDefaultDatasetArgs

    DatasetId string
    The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.
    ProjectId string
    The ID of the project containing this table.
    DatasetId string
    The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.
    ProjectId string
    The ID of the project containing this table.
    datasetId String
    The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.
    projectId String
    The ID of the project containing this table.
    datasetId string
    The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.
    projectId string
    The ID of the project containing this table.
    dataset_id str
    The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.
    project_id str
    The ID of the project containing this table.
    datasetId String
    The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.
    projectId String
    The ID of the project containing this table.

    JobQueryDestinationEncryptionConfiguration, JobQueryDestinationEncryptionConfigurationArgs

    KmsKeyName string
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    KmsKeyVersion string
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    KmsKeyName string
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    KmsKeyVersion string
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    kmsKeyName String
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    kmsKeyVersion String
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    kmsKeyName string
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    kmsKeyVersion string
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    kms_key_name str
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    kms_key_version str
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
    kmsKeyName String
    Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
    kmsKeyVersion String
    (Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

    JobQueryDestinationTable, JobQueryDestinationTableArgs

    TableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    TableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    DatasetId string
    The ID of the dataset containing this table.
    ProjectId string
    The ID of the project containing this table.
    tableId String
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.
    tableId string
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId string
    The ID of the dataset containing this table.
    projectId string
    The ID of the project containing this table.
    table_id str
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    dataset_id str
    The ID of the dataset containing this table.
    project_id str
    The ID of the project containing this table.
    tableId String
    The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
    datasetId String
    The ID of the dataset containing this table.
    projectId String
    The ID of the project containing this table.

    JobQueryScriptOptions, JobQueryScriptOptionsArgs

    KeyResultStatement string
    Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.
    StatementByteBudget string
    Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
    StatementTimeoutMs string
    Timeout period for each statement in a script.
    KeyResultStatement string
    Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.
    StatementByteBudget string
    Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
    StatementTimeoutMs string
    Timeout period for each statement in a script.
    keyResultStatement String
    Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.
    statementByteBudget String
    Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
    statementTimeoutMs String
    Timeout period for each statement in a script.
    keyResultStatement string
    Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.
    statementByteBudget string
    Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
    statementTimeoutMs string
    Timeout period for each statement in a script.
    key_result_statement str
    Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.
    statement_byte_budget str
    Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
    statement_timeout_ms str
    Timeout period for each statement in a script.
    keyResultStatement String
    Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.
    statementByteBudget String
    Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
    statementTimeoutMs String
    Timeout period for each statement in a script.

    JobQueryUserDefinedFunctionResource, JobQueryUserDefinedFunctionResourceArgs

    InlineCode string
    An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
    ResourceUri string
    A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
    InlineCode string
    An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
    ResourceUri string
    A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
    inlineCode String
    An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
    resourceUri String
    A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
    inlineCode string
    An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
    resourceUri string
    A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
    inline_code str
    An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
    resource_uri str
    A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
    inlineCode String
    An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
    resourceUri String
    A code resource to load from a Google Cloud Storage URI (gs://bucket/path).

    JobStatus, JobStatusArgs

    ErrorResults List<JobStatusErrorResult>
    (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
    Errors List<JobStatusError>
    (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
    State string
    (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
    ErrorResults []JobStatusErrorResult
    (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
    Errors []JobStatusError
    (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
    State string
    (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
    errorResults List<JobStatusErrorResult>
    (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
    errors List<JobStatusError>
    (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
    state String
    (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
    errorResults JobStatusErrorResult[]
    (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
    errors JobStatusError[]
    (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
    state string
    (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
    error_results Sequence[JobStatusErrorResult]
    (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
    errors Sequence[JobStatusError]
    (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
    state str
    (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
    errorResults List<Property Map>
    (Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
    errors List<Property Map>
    (Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
    state String
    (Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.

    JobStatusError, JobStatusErrorArgs

    Location string
    The geographic location of the job. The default value is US.
    Message string
    A human-readable description of the error.
    Reason string
    A short error code that summarizes the error.
    Location string
    The geographic location of the job. The default value is US.
    Message string
    A human-readable description of the error.
    Reason string
    A short error code that summarizes the error.
    location String
    The geographic location of the job. The default value is US.
    message String
    A human-readable description of the error.
    reason String
    A short error code that summarizes the error.
    location string
    The geographic location of the job. The default value is US.
    message string
    A human-readable description of the error.
    reason string
    A short error code that summarizes the error.
    location str
    The geographic location of the job. The default value is US.
    message str
    A human-readable description of the error.
    reason str
    A short error code that summarizes the error.
    location String
    The geographic location of the job. The default value is US.
    message String
    A human-readable description of the error.
    reason String
    A short error code that summarizes the error.

    JobStatusErrorResult, JobStatusErrorResultArgs

    Location string
    The geographic location of the job. The default value is US.
    Message string
    A human-readable description of the error.
    Reason string
    A short error code that summarizes the error.
    Location string
    The geographic location of the job. The default value is US.
    Message string
    A human-readable description of the error.
    Reason string
    A short error code that summarizes the error.
    location String
    The geographic location of the job. The default value is US.
    message String
    A human-readable description of the error.
    reason String
    A short error code that summarizes the error.
    location string
    The geographic location of the job. The default value is US.
    message string
    A human-readable description of the error.
    reason string
    A short error code that summarizes the error.
    location str
    The geographic location of the job. The default value is US.
    message str
    A human-readable description of the error.
    reason str
    A short error code that summarizes the error.
    location String
    The geographic location of the job. The default value is US.
    message String
    A human-readable description of the error.
    reason String
    A short error code that summarizes the error.

    Import

    Job can be imported using any of these accepted formats:

    • projects/{{project}}/jobs/{{job_id}}/location/{{location}}

    • projects/{{project}}/jobs/{{job_id}}

    • {{project}}/{{job_id}}/{{location}}

    • {{job_id}}/{{location}}

    • {{project}}/{{job_id}}

    • {{job_id}}

    When using the pulumi import command, Job can be imported using one of the formats above. For example:

    $ pulumi import gcp:bigquery/job:Job default projects/{{project}}/jobs/{{job_id}}/location/{{location}}
    
    $ pulumi import gcp:bigquery/job:Job default projects/{{project}}/jobs/{{job_id}}
    
    $ pulumi import gcp:bigquery/job:Job default {{project}}/{{job_id}}/{{location}}
    
    $ pulumi import gcp:bigquery/job:Job default {{job_id}}/{{location}}
    
    $ pulumi import gcp:bigquery/job:Job default {{project}}/{{job_id}}
    
    $ pulumi import gcp:bigquery/job:Job default {{job_id}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v7.19.0 published on Thursday, Apr 18, 2024 by Pulumi