Job Args
Creates a job on Dataflow, which is an implementation of Apache Beam running on Google Compute Engine. For more information see the official documentation for Beam and Dataflow.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const bigDataJob = new gcp.dataflow.Job("big_data_job", {
name: "dataflow-job",
templateGcsPath: "gs://my-bucket/templates/template_file",
tempGcsLocation: "gs://my-bucket/tmp_dir",
parameters: {
foo: "bar",
baz: "qux",
},
});
import pulumi
import pulumi_gcp as gcp
big_data_job = gcp.dataflow.Job("big_data_job",
name="dataflow-job",
template_gcs_path="gs://my-bucket/templates/template_file",
temp_gcs_location="gs://my-bucket/tmp_dir",
parameters={
"foo": "bar",
"baz": "qux",
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var bigDataJob = new Gcp.Dataflow.Job("big_data_job", new()
{
Name = "dataflow-job",
TemplateGcsPath = "gs://my-bucket/templates/template_file",
TempGcsLocation = "gs://my-bucket/tmp_dir",
Parameters =
{
{ "foo", "bar" },
{ "baz", "qux" },
},
});
});
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataflow"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataflow.NewJob(ctx, "big_data_job", &dataflow.JobArgs{
Name: pulumi.String("dataflow-job"),
TemplateGcsPath: pulumi.String("gs://my-bucket/templates/template_file"),
TempGcsLocation: pulumi.String("gs://my-bucket/tmp_dir"),
Parameters: pulumi.StringMap{
"foo": pulumi.String("bar"),
"baz": pulumi.String("qux"),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataflow.Job;
import com.pulumi.gcp.dataflow.JobArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bigDataJob = new Job("bigDataJob", JobArgs.builder()
.name("dataflow-job")
.templateGcsPath("gs://my-bucket/templates/template_file")
.tempGcsLocation("gs://my-bucket/tmp_dir")
.parameters(Map.ofEntries(
Map.entry("foo", "bar"),
Map.entry("baz", "qux")
))
.build());
}
}
resources:
bigDataJob:
type: gcp:dataflow:Job
name: big_data_job
properties:
name: dataflow-job
templateGcsPath: gs://my-bucket/templates/template_file
tempGcsLocation: gs://my-bucket/tmp_dir
parameters:
foo: bar
baz: qux
Streaming Job
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const topic = new gcp.pubsub.Topic("topic", {name: "dataflow-job1"});
const bucket1 = new gcp.storage.Bucket("bucket1", {
name: "tf-test-bucket1",
location: "US",
forceDestroy: true,
});
const bucket2 = new gcp.storage.Bucket("bucket2", {
name: "tf-test-bucket2",
location: "US",
forceDestroy: true,
});
const pubsubStream = new gcp.dataflow.Job("pubsub_stream", {
name: "tf-test-dataflow-job1",
templateGcsPath: "gs://my-bucket/templates/template_file",
tempGcsLocation: "gs://my-bucket/tmp_dir",
enableStreamingEngine: true,
parameters: {
inputFilePattern: pulumi.interpolate`${bucket1.url}/*.json`,
outputTopic: topic.id,
},
transformNameMapping: {
name: "test_job",
env: "test",
},
onDelete: "cancel",
});
import pulumi
import pulumi_gcp as gcp
topic = gcp.pubsub.Topic("topic", name="dataflow-job1")
bucket1 = gcp.storage.Bucket("bucket1",
name="tf-test-bucket1",
location="US",
force_destroy=True)
bucket2 = gcp.storage.Bucket("bucket2",
name="tf-test-bucket2",
location="US",
force_destroy=True)
pubsub_stream = gcp.dataflow.Job("pubsub_stream",
name="tf-test-dataflow-job1",
template_gcs_path="gs://my-bucket/templates/template_file",
temp_gcs_location="gs://my-bucket/tmp_dir",
enable_streaming_engine=True,
parameters={
"inputFilePattern": bucket1.url.apply(lambda url: f"{url}/*.json"),
"outputTopic": topic.id,
},
transform_name_mapping={
"name": "test_job",
"env": "test",
},
on_delete="cancel")
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var topic = new Gcp.PubSub.Topic("topic", new()
{
Name = "dataflow-job1",
});
var bucket1 = new Gcp.Storage.Bucket("bucket1", new()
{
Name = "tf-test-bucket1",
Location = "US",
ForceDestroy = true,
});
var bucket2 = new Gcp.Storage.Bucket("bucket2", new()
{
Name = "tf-test-bucket2",
Location = "US",
ForceDestroy = true,
});
var pubsubStream = new Gcp.Dataflow.Job("pubsub_stream", new()
{
Name = "tf-test-dataflow-job1",
TemplateGcsPath = "gs://my-bucket/templates/template_file",
TempGcsLocation = "gs://my-bucket/tmp_dir",
EnableStreamingEngine = true,
Parameters =
{
{ "inputFilePattern", bucket1.Url.Apply(url => $"{url}/*.json") },
{ "outputTopic", topic.Id },
},
TransformNameMapping =
{
{ "name", "test_job" },
{ "env", "test" },
},
OnDelete = "cancel",
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataflow"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/pubsub"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
topic, err := pubsub.NewTopic(ctx, "topic", &pubsub.TopicArgs{
Name: pulumi.String("dataflow-job1"),
})
if err != nil {
return err
}
bucket1, err := storage.NewBucket(ctx, "bucket1", &storage.BucketArgs{
Name: pulumi.String("tf-test-bucket1"),
Location: pulumi.String("US"),
ForceDestroy: pulumi.Bool(true),
})
if err != nil {
return err
}
_, err = storage.NewBucket(ctx, "bucket2", &storage.BucketArgs{
Name: pulumi.String("tf-test-bucket2"),
Location: pulumi.String("US"),
ForceDestroy: pulumi.Bool(true),
})
if err != nil {
return err
}
_, err = dataflow.NewJob(ctx, "pubsub_stream", &dataflow.JobArgs{
Name: pulumi.String("tf-test-dataflow-job1"),
TemplateGcsPath: pulumi.String("gs://my-bucket/templates/template_file"),
TempGcsLocation: pulumi.String("gs://my-bucket/tmp_dir"),
EnableStreamingEngine: pulumi.Bool(true),
Parameters: pulumi.StringMap{
"inputFilePattern": bucket1.Url.ApplyT(func(url string) (string, error) {
return fmt.Sprintf("%v/*.json", url), nil
}).(pulumi.StringOutput),
"outputTopic": topic.ID(),
},
TransformNameMapping: pulumi.StringMap{
"name": pulumi.String("test_job"),
"env": pulumi.String("test"),
},
OnDelete: pulumi.String("cancel"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.pubsub.Topic;
import com.pulumi.gcp.pubsub.TopicArgs;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.dataflow.Job;
import com.pulumi.gcp.dataflow.JobArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var topic = new Topic("topic", TopicArgs.builder()
.name("dataflow-job1")
.build());
var bucket1 = new Bucket("bucket1", BucketArgs.builder()
.name("tf-test-bucket1")
.location("US")
.forceDestroy(true)
.build());
var bucket2 = new Bucket("bucket2", BucketArgs.builder()
.name("tf-test-bucket2")
.location("US")
.forceDestroy(true)
.build());
var pubsubStream = new Job("pubsubStream", JobArgs.builder()
.name("tf-test-dataflow-job1")
.templateGcsPath("gs://my-bucket/templates/template_file")
.tempGcsLocation("gs://my-bucket/tmp_dir")
.enableStreamingEngine(true)
.parameters(Map.ofEntries(
Map.entry("inputFilePattern", bucket1.url().applyValue(url -> String.format("%s/*.json", url))),
Map.entry("outputTopic", topic.id())
))
.transformNameMapping(Map.ofEntries(
Map.entry("name", "test_job"),
Map.entry("env", "test")
))
.onDelete("cancel")
.build());
}
}
resources:
topic:
type: gcp:pubsub:Topic
properties:
name: dataflow-job1
bucket1:
type: gcp:storage:Bucket
properties:
name: tf-test-bucket1
location: US
forceDestroy: true
bucket2:
type: gcp:storage:Bucket
properties:
name: tf-test-bucket2
location: US
forceDestroy: true
pubsubStream:
type: gcp:dataflow:Job
name: pubsub_stream
properties:
name: tf-test-dataflow-job1
templateGcsPath: gs://my-bucket/templates/template_file
tempGcsLocation: gs://my-bucket/tmp_dir
enableStreamingEngine: true
parameters:
inputFilePattern: ${bucket1.url}/*.json
outputTopic: ${topic.id}
transformNameMapping:
name: test_job
env: test
onDelete: cancel
Note on "destroy" / "apply"
There are many types of Dataflow jobs. Some Dataflow jobs run constantly, getting new data from (e.g.) a GCS bucket, and outputting data continuously. Some jobs process a set amount of data then terminate. All jobs can fail while running due to programming errors or other issues. In this way, Dataflow jobs are different from most other Google resources. The Dataflow resource is considered 'existing' while it is in a nonterminal state. If it reaches a terminal state (e.g. 'FAILED', 'COMPLETE', 'CANCELLED'), it will be recreated on the next 'apply'. This is as expected for jobs which run continuously, but may surprise users who use this resource for other kinds of Dataflow jobs. A Dataflow job which is 'destroyed' may be "cancelled" or "drained". If "cancelled", the job terminates - any data written remains where it is, but no new data will be processed. If "drained", no new data will enter the pipeline, but any data currently in the pipeline will finish being processed. The default is "drain". When on_delete
is set to "drain"
in the configuration, you may experience a long wait for your pulumi destroy
to complete. You can potentially short-circuit the wait by setting skip_wait_on_job_termination
to true
, but beware that unless you take active steps to ensure that the job name
parameter changes between instances, the name will conflict and the launch of the new job will fail. One way to do this is with a random_id resource, for example:
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
import * as random from "@pulumi/random";
const config = new pulumi.Config();
const bigDataJobSubscriptionId = config.get("bigDataJobSubscriptionId") || "projects/myproject/subscriptions/messages";
const bigDataJobNameSuffix = new random.RandomId("big_data_job_name_suffix", {
byteLength: 4,
keepers: {
region: region,
subscription_id: bigDataJobSubscriptionId,
},
});
const bigDataJob = new gcp.dataflow.FlexTemplateJob("big_data_job", {
name: pulumi.interpolate`dataflow-flextemplates-job-${bigDataJobNameSuffix.dec}`,
region: region,
containerSpecGcsPath: "gs://my-bucket/templates/template.json",
skipWaitOnJobTermination: true,
parameters: {
inputSubscription: bigDataJobSubscriptionId,
},
});
import pulumi
import pulumi_gcp as gcp
import pulumi_random as random
config = pulumi.Config()
big_data_job_subscription_id = config.get("bigDataJobSubscriptionId")
if big_data_job_subscription_id is None:
big_data_job_subscription_id = "projects/myproject/subscriptions/messages"
big_data_job_name_suffix = random.RandomId("big_data_job_name_suffix",
byte_length=4,
keepers={
"region": region,
"subscription_id": big_data_job_subscription_id,
})
big_data_job = gcp.dataflow.FlexTemplateJob("big_data_job",
name=big_data_job_name_suffix.dec.apply(lambda dec: f"dataflow-flextemplates-job-{dec}"),
region=region,
container_spec_gcs_path="gs://my-bucket/templates/template.json",
skip_wait_on_job_termination=True,
parameters={
"inputSubscription": big_data_job_subscription_id,
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
using Random = Pulumi.Random;
return await Deployment.RunAsync(() =>
{
var config = new Config();
var bigDataJobSubscriptionId = config.Get("bigDataJobSubscriptionId") ?? "projects/myproject/subscriptions/messages";
var bigDataJobNameSuffix = new Random.RandomId("big_data_job_name_suffix", new()
{
ByteLength = 4,
Keepers =
{
{ "region", region },
{ "subscription_id", bigDataJobSubscriptionId },
},
});
var bigDataJob = new Gcp.Dataflow.FlexTemplateJob("big_data_job", new()
{
Name = bigDataJobNameSuffix.Dec.Apply(dec => $"dataflow-flextemplates-job-{dec}"),
Region = region,
ContainerSpecGcsPath = "gs://my-bucket/templates/template.json",
SkipWaitOnJobTermination = true,
Parameters =
{
{ "inputSubscription", bigDataJobSubscriptionId },
},
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataflow"
"github.com/pulumi/pulumi-random/sdk/v4/go/random"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
cfg := config.New(ctx, "")
bigDataJobSubscriptionId := "projects/myproject/subscriptions/messages"
if param := cfg.Get("bigDataJobSubscriptionId"); param != "" {
bigDataJobSubscriptionId = param
}
bigDataJobNameSuffix, err := random.NewRandomId(ctx, "big_data_job_name_suffix", &random.RandomIdArgs{
ByteLength: pulumi.Int(4),
Keepers: pulumi.StringMap{
"region": pulumi.Any(region),
"subscription_id": pulumi.String(bigDataJobSubscriptionId),
},
})
if err != nil {
return err
}
_, err = dataflow.NewFlexTemplateJob(ctx, "big_data_job", &dataflow.FlexTemplateJobArgs{
Name: bigDataJobNameSuffix.Dec.ApplyT(func(dec string) (string, error) {
return fmt.Sprintf("dataflow-flextemplates-job-%v", dec), nil
}).(pulumi.StringOutput),
Region: pulumi.Any(region),
ContainerSpecGcsPath: pulumi.String("gs://my-bucket/templates/template.json"),
SkipWaitOnJobTermination: pulumi.Bool(true),
Parameters: pulumi.StringMap{
"inputSubscription": pulumi.String(bigDataJobSubscriptionId),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.random.RandomId;
import com.pulumi.random.RandomIdArgs;
import com.pulumi.gcp.dataflow.FlexTemplateJob;
import com.pulumi.gcp.dataflow.FlexTemplateJobArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var config = ctx.config();
final var bigDataJobSubscriptionId = config.get("bigDataJobSubscriptionId").orElse("projects/myproject/subscriptions/messages");
var bigDataJobNameSuffix = new RandomId("bigDataJobNameSuffix", RandomIdArgs.builder()
.byteLength(4)
.keepers(Map.ofEntries(
Map.entry("region", region),
Map.entry("subscription_id", bigDataJobSubscriptionId)
))
.build());
var bigDataJob = new FlexTemplateJob("bigDataJob", FlexTemplateJobArgs.builder()
.name(bigDataJobNameSuffix.dec().applyValue(dec -> String.format("dataflow-flextemplates-job-%s", dec)))
.region(region)
.containerSpecGcsPath("gs://my-bucket/templates/template.json")
.skipWaitOnJobTermination(true)
.parameters(Map.of("inputSubscription", bigDataJobSubscriptionId))
.build());
}
}
configuration:
bigDataJobSubscriptionId:
type: string
default: projects/myproject/subscriptions/messages
resources:
bigDataJobNameSuffix:
type: random:RandomId
name: big_data_job_name_suffix
properties:
byteLength: 4
keepers:
region: ${region}
subscription_id: ${bigDataJobSubscriptionId}
bigDataJob:
type: gcp:dataflow:FlexTemplateJob
name: big_data_job
properties:
name: dataflow-flextemplates-job-${bigDataJobNameSuffix.dec}
region: ${region}
containerSpecGcsPath: gs://my-bucket/templates/template.json
skipWaitOnJobTermination: true
parameters:
inputSubscription: ${bigDataJobSubscriptionId}
Import
Dataflow jobs can be imported using the job id
e.g.
{{id}}
When using thepulumi import
command, dataflow jobs can be imported using one of the formats above. For example:
$ pulumi import gcp:dataflow/job:Job default {{id}}
Constructors
Properties
List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"]
.
Enable/disable the use of Streaming Engine for the job. Note that Streaming Engine is enabled by default for pipelines developed against the Beam SDK for Python v2.21.0 or later when using Python 3.
The configuration for VM IPs. Options are "WORKER_IP_PUBLIC"
or "WORKER_IP_PRIVATE"
.
The name for the Cloud KMS key for the job. Key format is: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
User labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels
for all of the labels present on the resource.
The machine type to use for the job.
The number of workers permitted to work on the job. More workers may improve processing speed at additional cost.
Template specific Key/Value pairs to be forwarded to the pipeline's options; keys are case-sensitive based on the language on which the pipeline is coded, mostly Java. Note: do not configure Dataflow options here in parameters.
The Service Account email used to create the job. This should be just an email e.g. myserviceaccount@myproject.iam.gserviceaccount.com
. Do not include any serviceAccount:
or other prefix.
If set to true
, Pulumi will treat DRAINING
and CANCELLING
as terminal states when deleting the resource, and will remove the resource from Pulumi state and move on. See above note.
The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL. For example "googleapis.com/compute/v1/projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET_NAME"
A writeable location on GCS for the Dataflow job to dump its temporary data.
The GCS path to the Dataflow job template.
Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job. This field is not used outside of update.