Deploy Google Cloud Dataflow Jobs

The gcp:dataflow/job:Job resource, part of the Pulumi GCP provider, launches and manages Dataflow jobs that execute Apache Beam pipelines on Google Compute Engine. This guide focuses on three capabilities: template-based deployment, Streaming Engine configuration, and job lifecycle management.

Dataflow jobs require pre-built templates stored in GCS, temporary storage buckets for intermediate data, and may reference Pub/Sub topics or VPC networks. The examples are intentionally small. Combine them with your own pipeline templates, networking, and monitoring configuration.

Launch a batch job from a template

Most deployments start by launching a job from a pre-built template stored in GCS, passing runtime parameters to customize behavior.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const bigDataJob = new gcp.dataflow.Job("big_data_job", {
    name: "dataflow-job",
    templateGcsPath: "gs://my-bucket/templates/template_file",
    tempGcsLocation: "gs://my-bucket/tmp_dir",
    parameters: {
        foo: "bar",
        baz: "qux",
    },
});
import pulumi
import pulumi_gcp as gcp

big_data_job = gcp.dataflow.Job("big_data_job",
    name="dataflow-job",
    template_gcs_path="gs://my-bucket/templates/template_file",
    temp_gcs_location="gs://my-bucket/tmp_dir",
    parameters={
        "foo": "bar",
        "baz": "qux",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/dataflow"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataflow.NewJob(ctx, "big_data_job", &dataflow.JobArgs{
			Name:            pulumi.String("dataflow-job"),
			TemplateGcsPath: pulumi.String("gs://my-bucket/templates/template_file"),
			TempGcsLocation: pulumi.String("gs://my-bucket/tmp_dir"),
			Parameters: pulumi.StringMap{
				"foo": pulumi.String("bar"),
				"baz": pulumi.String("qux"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var bigDataJob = new Gcp.Dataflow.Job("big_data_job", new()
    {
        Name = "dataflow-job",
        TemplateGcsPath = "gs://my-bucket/templates/template_file",
        TempGcsLocation = "gs://my-bucket/tmp_dir",
        Parameters = 
        {
            { "foo", "bar" },
            { "baz", "qux" },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataflow.Job;
import com.pulumi.gcp.dataflow.JobArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var bigDataJob = new Job("bigDataJob", JobArgs.builder()
            .name("dataflow-job")
            .templateGcsPath("gs://my-bucket/templates/template_file")
            .tempGcsLocation("gs://my-bucket/tmp_dir")
            .parameters(Map.ofEntries(
                Map.entry("foo", "bar"),
                Map.entry("baz", "qux")
            ))
            .build());

    }
}
resources:
  bigDataJob:
    type: gcp:dataflow:Job
    name: big_data_job
    properties:
      name: dataflow-job
      templateGcsPath: gs://my-bucket/templates/template_file
      tempGcsLocation: gs://my-bucket/tmp_dir
      parameters:
        foo: bar
        baz: qux

The templateGcsPath points to your packaged pipeline. The tempGcsLocation provides scratch space for intermediate data. The parameters object passes key-value pairs to your pipeline code; keys are case-sensitive and match your pipeline’s option names.

Process streaming data with Streaming Engine

Streaming pipelines continuously process data from sources like Pub/Sub. Streaming Engine offloads state management from workers to the Dataflow service, reducing operational overhead.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const topic = new gcp.pubsub.Topic("topic", {name: "dataflow-job1"});
const bucket1 = new gcp.storage.Bucket("bucket1", {
    name: "tf-test-bucket1",
    location: "US",
    forceDestroy: true,
});
const bucket2 = new gcp.storage.Bucket("bucket2", {
    name: "tf-test-bucket2",
    location: "US",
    forceDestroy: true,
});
const pubsubStream = new gcp.dataflow.Job("pubsub_stream", {
    name: "tf-test-dataflow-job1",
    templateGcsPath: "gs://my-bucket/templates/template_file",
    tempGcsLocation: "gs://my-bucket/tmp_dir",
    enableStreamingEngine: true,
    parameters: {
        inputFilePattern: pulumi.interpolate`${bucket1.url}/*.json`,
        outputTopic: topic.id,
    },
    transformNameMapping: {
        name: "test_job",
        env: "test",
    },
    onDelete: "cancel",
});
import pulumi
import pulumi_gcp as gcp

topic = gcp.pubsub.Topic("topic", name="dataflow-job1")
bucket1 = gcp.storage.Bucket("bucket1",
    name="tf-test-bucket1",
    location="US",
    force_destroy=True)
bucket2 = gcp.storage.Bucket("bucket2",
    name="tf-test-bucket2",
    location="US",
    force_destroy=True)
pubsub_stream = gcp.dataflow.Job("pubsub_stream",
    name="tf-test-dataflow-job1",
    template_gcs_path="gs://my-bucket/templates/template_file",
    temp_gcs_location="gs://my-bucket/tmp_dir",
    enable_streaming_engine=True,
    parameters={
        "inputFilePattern": bucket1.url.apply(lambda url: f"{url}/*.json"),
        "outputTopic": topic.id,
    },
    transform_name_mapping={
        "name": "test_job",
        "env": "test",
    },
    on_delete="cancel")
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/dataflow"
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/pubsub"
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		topic, err := pubsub.NewTopic(ctx, "topic", &pubsub.TopicArgs{
			Name: pulumi.String("dataflow-job1"),
		})
		if err != nil {
			return err
		}
		bucket1, err := storage.NewBucket(ctx, "bucket1", &storage.BucketArgs{
			Name:         pulumi.String("tf-test-bucket1"),
			Location:     pulumi.String("US"),
			ForceDestroy: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		_, err = storage.NewBucket(ctx, "bucket2", &storage.BucketArgs{
			Name:         pulumi.String("tf-test-bucket2"),
			Location:     pulumi.String("US"),
			ForceDestroy: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		_, err = dataflow.NewJob(ctx, "pubsub_stream", &dataflow.JobArgs{
			Name:                  pulumi.String("tf-test-dataflow-job1"),
			TemplateGcsPath:       pulumi.String("gs://my-bucket/templates/template_file"),
			TempGcsLocation:       pulumi.String("gs://my-bucket/tmp_dir"),
			EnableStreamingEngine: pulumi.Bool(true),
			Parameters: pulumi.StringMap{
				"inputFilePattern": bucket1.Url.ApplyT(func(url string) (string, error) {
					return fmt.Sprintf("%v/*.json", url), nil
				}).(pulumi.StringOutput),
				"outputTopic": topic.ID(),
			},
			TransformNameMapping: pulumi.StringMap{
				"name": pulumi.String("test_job"),
				"env":  pulumi.String("test"),
			},
			OnDelete: pulumi.String("cancel"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var topic = new Gcp.PubSub.Topic("topic", new()
    {
        Name = "dataflow-job1",
    });

    var bucket1 = new Gcp.Storage.Bucket("bucket1", new()
    {
        Name = "tf-test-bucket1",
        Location = "US",
        ForceDestroy = true,
    });

    var bucket2 = new Gcp.Storage.Bucket("bucket2", new()
    {
        Name = "tf-test-bucket2",
        Location = "US",
        ForceDestroy = true,
    });

    var pubsubStream = new Gcp.Dataflow.Job("pubsub_stream", new()
    {
        Name = "tf-test-dataflow-job1",
        TemplateGcsPath = "gs://my-bucket/templates/template_file",
        TempGcsLocation = "gs://my-bucket/tmp_dir",
        EnableStreamingEngine = true,
        Parameters = 
        {
            { "inputFilePattern", bucket1.Url.Apply(url => $"{url}/*.json") },
            { "outputTopic", topic.Id },
        },
        TransformNameMapping = 
        {
            { "name", "test_job" },
            { "env", "test" },
        },
        OnDelete = "cancel",
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.pubsub.Topic;
import com.pulumi.gcp.pubsub.TopicArgs;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.dataflow.Job;
import com.pulumi.gcp.dataflow.JobArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var topic = new Topic("topic", TopicArgs.builder()
            .name("dataflow-job1")
            .build());

        var bucket1 = new Bucket("bucket1", BucketArgs.builder()
            .name("tf-test-bucket1")
            .location("US")
            .forceDestroy(true)
            .build());

        var bucket2 = new Bucket("bucket2", BucketArgs.builder()
            .name("tf-test-bucket2")
            .location("US")
            .forceDestroy(true)
            .build());

        var pubsubStream = new Job("pubsubStream", JobArgs.builder()
            .name("tf-test-dataflow-job1")
            .templateGcsPath("gs://my-bucket/templates/template_file")
            .tempGcsLocation("gs://my-bucket/tmp_dir")
            .enableStreamingEngine(true)
            .parameters(Map.ofEntries(
                Map.entry("inputFilePattern", bucket1.url().applyValue(_url -> String.format("%s/*.json", _url))),
                Map.entry("outputTopic", topic.id())
            ))
            .transformNameMapping(Map.ofEntries(
                Map.entry("name", "test_job"),
                Map.entry("env", "test")
            ))
            .onDelete("cancel")
            .build());

    }
}
resources:
  topic:
    type: gcp:pubsub:Topic
    properties:
      name: dataflow-job1
  bucket1:
    type: gcp:storage:Bucket
    properties:
      name: tf-test-bucket1
      location: US
      forceDestroy: true
  bucket2:
    type: gcp:storage:Bucket
    properties:
      name: tf-test-bucket2
      location: US
      forceDestroy: true
  pubsubStream:
    type: gcp:dataflow:Job
    name: pubsub_stream
    properties:
      name: tf-test-dataflow-job1
      templateGcsPath: gs://my-bucket/templates/template_file
      tempGcsLocation: gs://my-bucket/tmp_dir
      enableStreamingEngine: true
      parameters:
        inputFilePattern: ${bucket1.url}/*.json
        outputTopic: ${topic.id}
      transformNameMapping:
        name: test_job
        env: test
      onDelete: cancel

When enableStreamingEngine is true, Dataflow manages pipeline state externally rather than on worker VMs. The onDelete property controls termination behavior: “drain” finishes processing in-flight data before stopping, while “cancel” terminates immediately. The transformNameMapping property supports pipeline updates by mapping old transform names to new ones, allowing Dataflow to preserve state across deployments.

Handle job name conflicts with random suffixes

When skipWaitOnJobTermination is enabled, Pulumi doesn’t wait for job termination during destroy. This speeds up deployments but can cause name conflicts if you immediately recreate the job.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
import * as random from "@pulumi/random";

const config = new pulumi.Config();
const bigDataJobSubscriptionId = config.get("bigDataJobSubscriptionId") || "projects/myproject/subscriptions/messages";
const bigDataJobNameSuffix = new random.index.Id("big_data_job_name_suffix", {
    byteLength: 4,
    keepers: {
        region: region,
        subscriptionId: bigDataJobSubscriptionId,
    },
});
const bigDataJob = new gcp.dataflow.FlexTemplateJob("big_data_job", {
    name: `dataflow-flextemplates-job-${bigDataJobNameSuffix.dec}`,
    region: region,
    containerSpecGcsPath: "gs://my-bucket/templates/template.json",
    skipWaitOnJobTermination: true,
    parameters: {
        inputSubscription: bigDataJobSubscriptionId,
    },
});
import pulumi
import pulumi_gcp as gcp
import pulumi_random as random

config = pulumi.Config()
big_data_job_subscription_id = config.get("bigDataJobSubscriptionId")
if big_data_job_subscription_id is None:
    big_data_job_subscription_id = "projects/myproject/subscriptions/messages"
big_data_job_name_suffix = random.index.Id("big_data_job_name_suffix",
    byte_length=4,
    keepers={
        region: region,
        subscriptionId: big_data_job_subscription_id,
    })
big_data_job = gcp.dataflow.FlexTemplateJob("big_data_job",
    name=f"dataflow-flextemplates-job-{big_data_job_name_suffix['dec']}",
    region=region,
    container_spec_gcs_path="gs://my-bucket/templates/template.json",
    skip_wait_on_job_termination=True,
    parameters={
        "inputSubscription": big_data_job_subscription_id,
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/dataflow"
	"github.com/pulumi/pulumi-random/sdk/v4/go/random"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		cfg := config.New(ctx, "")
		bigDataJobSubscriptionId := "projects/myproject/subscriptions/messages"
		if param := cfg.Get("bigDataJobSubscriptionId"); param != "" {
			bigDataJobSubscriptionId = param
		}
		bigDataJobNameSuffix, err := random.NewId(ctx, "big_data_job_name_suffix", &random.IdArgs{
			ByteLength: 4,
			Keepers: map[string]interface{}{
				"region":         region,
				"subscriptionId": bigDataJobSubscriptionId,
			},
		})
		if err != nil {
			return err
		}
		_, err = dataflow.NewFlexTemplateJob(ctx, "big_data_job", &dataflow.FlexTemplateJobArgs{
			Name:                     pulumi.Sprintf("dataflow-flextemplates-job-%v", bigDataJobNameSuffix.Dec),
			Region:                   pulumi.Any(region),
			ContainerSpecGcsPath:     pulumi.String("gs://my-bucket/templates/template.json"),
			SkipWaitOnJobTermination: pulumi.Bool(true),
			Parameters: pulumi.StringMap{
				"inputSubscription": pulumi.String(bigDataJobSubscriptionId),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
using Random = Pulumi.Random;

return await Deployment.RunAsync(() => 
{
    var config = new Config();
    var bigDataJobSubscriptionId = config.Get("bigDataJobSubscriptionId") ?? "projects/myproject/subscriptions/messages";
    var bigDataJobNameSuffix = new Random.Index.Id("big_data_job_name_suffix", new()
    {
        ByteLength = 4,
        Keepers = 
        {
            { "region", region },
            { "subscriptionId", bigDataJobSubscriptionId },
        },
    });

    var bigDataJob = new Gcp.Dataflow.FlexTemplateJob("big_data_job", new()
    {
        Name = $"dataflow-flextemplates-job-{bigDataJobNameSuffix.Dec}",
        Region = region,
        ContainerSpecGcsPath = "gs://my-bucket/templates/template.json",
        SkipWaitOnJobTermination = true,
        Parameters = 
        {
            { "inputSubscription", bigDataJobSubscriptionId },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.random.Id;
import com.pulumi.random.IdArgs;
import com.pulumi.gcp.dataflow.FlexTemplateJob;
import com.pulumi.gcp.dataflow.FlexTemplateJobArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var config = ctx.config();
        final var bigDataJobSubscriptionId = config.get("bigDataJobSubscriptionId").orElse("projects/myproject/subscriptions/messages");
        var bigDataJobNameSuffix = new Id("bigDataJobNameSuffix", IdArgs.builder()
            .byteLength(4)
            .keepers(Map.ofEntries(
                Map.entry("region", region),
                Map.entry("subscriptionId", bigDataJobSubscriptionId)
            ))
            .build());

        var bigDataJob = new FlexTemplateJob("bigDataJob", FlexTemplateJobArgs.builder()
            .name(String.format("dataflow-flextemplates-job-%s", bigDataJobNameSuffix.dec()))
            .region(region)
            .containerSpecGcsPath("gs://my-bucket/templates/template.json")
            .skipWaitOnJobTermination(true)
            .parameters(Map.of("inputSubscription", bigDataJobSubscriptionId))
            .build());

    }
}
configuration:
  bigDataJobSubscriptionId:
    type: string
    default: projects/myproject/subscriptions/messages
resources:
  bigDataJobNameSuffix:
    type: random:Id
    name: big_data_job_name_suffix
    properties:
      byteLength: 4
      keepers:
        region: ${region}
        subscriptionId: ${bigDataJobSubscriptionId}
  bigDataJob:
    type: gcp:dataflow:FlexTemplateJob
    name: big_data_job
    properties:
      name: dataflow-flextemplates-job-${bigDataJobNameSuffix.dec}
      region: ${region}
      containerSpecGcsPath: gs://my-bucket/templates/template.json
      skipWaitOnJobTermination: true
      parameters:
        inputSubscription: ${bigDataJobSubscriptionId}

The random.index.Id resource generates a unique suffix for each deployment. The keepers property triggers a new suffix when region or subscription changes, ensuring the job name changes when infrastructure dependencies change. This pattern prevents “job already exists” errors when rapidly iterating on pipeline deployments.

Beyond these examples

These snippets focus on specific job-level features: template-based deployment, streaming engine and lifecycle control, and name conflict resolution. They’re intentionally minimal rather than full pipeline deployments.

The examples may reference pre-existing infrastructure such as GCS buckets for templates and temporary storage, Pub/Sub topics or subscriptions for streaming jobs, and VPC networks and subnets when not using defaults. They focus on launching the job rather than provisioning everything around it.

To keep things focused, common job patterns are omitted, including:

  • Network and subnetwork configuration (network, subnetwork, ipConfiguration)
  • Worker tuning (machineType, maxWorkers, zone)
  • Security controls (serviceAccountEmail, kmsKeyName)
  • Monitoring and experiments (additionalExperiments, labels)

These omissions are intentional: the goal is to illustrate how each job feature is wired, not provide drop-in pipeline modules. See the Dataflow Job resource reference for all available configuration options.

Let's deploy Google Cloud Dataflow Jobs

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

Job Lifecycle & Deletion
Why does my Dataflow job keep recreating after it completes or fails?
Dataflow jobs are considered existing only while in non-terminal states. When a job reaches a terminal state like FAILED, COMPLETE, or CANCELLED, it’s automatically recreated on the next apply. This behavior is expected for continuously-running jobs but may surprise users working with batch jobs.
What's the difference between drain and cancel when deleting a job?
When you destroy a Dataflow job, onDelete controls the behavior. With drain (the default), no new data enters the pipeline, but existing data finishes processing before termination. With cancel, the job terminates immediately and any in-flight data stops processing.
Why is my pulumi destroy taking so long?
If onDelete is set to drain (the default), Pulumi waits for all in-flight data to finish processing before completing the destroy operation. This can take a long time for jobs with significant data in the pipeline.
How can I speed up job deletion without waiting for drain to complete?
Set skipWaitOnJobTermination to true to treat DRAINING and CANCELLING as terminal states, removing the resource from state immediately. However, you must ensure the job name changes between instances to avoid conflicts. Use a random_id resource with keepers to generate unique name suffixes, as shown in the schema example.
Configuration & Parameters
What are the required fields for creating a Dataflow job?
You must provide name, templateGcsPath (GCS path to your job template), and tempGcsLocation (writable GCS location for temporary data). The project is also required but defaults to the provider project if not specified.
Can I configure Dataflow pipeline options in the parameters field?
No. The parameters field is for template-specific key/value pairs only. Do not configure Dataflow pipeline options here.
What format should I use for serviceAccountEmail?
Use just the email address, like myserviceaccount@myproject.iam.gserviceaccount.com. Do not include any serviceAccount: prefix or other qualifiers.
Networking & Security
How do I configure a Dataflow job to use a Shared VPC network?
When the subnetwork is in a Shared VPC network, you must use the complete URL format: googleapis.com/compute/v1/projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET_NAME. The short form regions/REGION/subnetworks/SUBNETWORK only works for non-shared VPCs.
Immutability & Updates
What properties can't I change after creating a Dataflow job?
The following properties are immutable: name, project, region, zone, and maxWorkers. Changing any of these requires recreating the job.
When is Streaming Engine enabled by default?
Streaming Engine is automatically enabled for pipelines developed against the Beam SDK for Python v2.21.0 or later when using Python 3. For other configurations, you must explicitly set enableStreamingEngine to true.

Using a different cloud?

Explore analytics guides for other cloud providers: