FlexTemplateJob

class FlexTemplateJob : KotlinCustomResource

Example Usage

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataflow.FlexTemplateJob;
import com.pulumi.gcp.dataflow.FlexTemplateJobArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bigDataJob = new FlexTemplateJob("bigDataJob", FlexTemplateJobArgs.builder()
.containerSpecGcsPath("gs://my-bucket/templates/template.json")
.parameters(Map.of("inputSubscription", "messages"))
.build(), CustomResourceOptions.builder()
.provider(google_beta)
.build());
}
}

Note on "destroy" / "apply"

There are many types of Dataflow jobs. Some Dataflow jobs run constantly, getting new data from (e.g.) a GCS bucket, and outputting data continuously. Some jobs process a set amount of data then terminate. All jobs can fail while running due to programming errors or other issues. In this way, Dataflow jobs are different from most other provider / Google resources. The Dataflow resource is considered 'existing' while it is in a nonterminal state. If it reaches a terminal state (e.g. 'FAILED', 'COMPLETE', 'CANCELLED'), it will be recreated on the next 'apply'. This is as expected for jobs which run continuously, but may surprise users who use this resource for other kinds of Dataflow jobs. A Dataflow job which is 'destroyed' may be "cancelled" or "drained". If "cancelled", the job terminates - any data written remains where it is, but no new data will be processed. If "drained", no new data will enter the pipeline, but any data currently in the pipeline will finish being processed. The default is "cancelled", but if a user sets on_delete to "drain" in the configuration, you may experience a long wait for your pulumi destroy to complete. You can potentially short-circuit the wait by setting skip_wait_on_job_termination to true, but beware that unless you take active steps to ensure that the job name parameter changes between instances, the name will conflict and the launch of the new job will fail. One way to do this is with a random_id resource, for example:

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
import * as random from "@pulumi/random";
const config = new pulumi.Config();
const bigDataJobSubscriptionId = config.get("bigDataJobSubscriptionId") || "projects/myproject/subscriptions/messages";
const bigDataJobNameSuffix = new random.RandomId("bigDataJobNameSuffix", {
byteLength: 4,
keepers: {
region: _var.region,
subscription_id: bigDataJobSubscriptionId,
},
});
const bigDataJob = new gcp.dataflow.FlexTemplateJob("bigDataJob", {
region: _var.region,
containerSpecGcsPath: "gs://my-bucket/templates/template.json",
skipWaitOnJobTermination: true,
parameters: {
inputSubscription: bigDataJobSubscriptionId,
},
}, {
provider: google_beta,
});
import pulumi
import pulumi_gcp as gcp
import pulumi_random as random
config = pulumi.Config()
big_data_job_subscription_id = config.get("bigDataJobSubscriptionId")
if big_data_job_subscription_id is None:
big_data_job_subscription_id = "projects/myproject/subscriptions/messages"
big_data_job_name_suffix = random.RandomId("bigDataJobNameSuffix",
byte_length=4,
keepers={
"region": var["region"],
"subscription_id": big_data_job_subscription_id,
})
big_data_job = gcp.dataflow.FlexTemplateJob("bigDataJob",
region=var["region"],
container_spec_gcs_path="gs://my-bucket/templates/template.json",
skip_wait_on_job_termination=True,
parameters={
"inputSubscription": big_data_job_subscription_id,
},
opts=pulumi.ResourceOptions(provider=google_beta))
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
using Random = Pulumi.Random;
return await Deployment.RunAsync(() =>
{
var config = new Config();
var bigDataJobSubscriptionId = config.Get("bigDataJobSubscriptionId") ?? "projects/myproject/subscriptions/messages";
var bigDataJobNameSuffix = new Random.RandomId("bigDataJobNameSuffix", new()
{
ByteLength = 4,
Keepers =
{
{ "region", @var.Region },
{ "subscription_id", bigDataJobSubscriptionId },
},
});
var bigDataJob = new Gcp.Dataflow.FlexTemplateJob("bigDataJob", new()
{
Region = @var.Region,
ContainerSpecGcsPath = "gs://my-bucket/templates/template.json",
SkipWaitOnJobTermination = true,
Parameters =
{
{ "inputSubscription", bigDataJobSubscriptionId },
},
}, new CustomResourceOptions
{
Provider = google_beta,
});
});
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataflow"
"github.com/pulumi/pulumi-random/sdk/v4/go/random"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
cfg := config.New(ctx, "")
bigDataJobSubscriptionId := "projects/myproject/subscriptions/messages"
if param := cfg.Get("bigDataJobSubscriptionId"); param != "" {
bigDataJobSubscriptionId = param
}
_, err := random.NewRandomId(ctx, "bigDataJobNameSuffix", &random.RandomIdArgs{
ByteLength: pulumi.Int(4),
Keepers: pulumi.Map{
"region": pulumi.Any(_var.Region),
"subscription_id": pulumi.String(bigDataJobSubscriptionId),
},
})
if err != nil {
return err
}
_, err = dataflow.NewFlexTemplateJob(ctx, "bigDataJob", &dataflow.FlexTemplateJobArgs{
Region: pulumi.Any(_var.Region),
ContainerSpecGcsPath: pulumi.String("gs://my-bucket/templates/template.json"),
SkipWaitOnJobTermination: pulumi.Bool(true),
Parameters: pulumi.Map{
"inputSubscription": pulumi.String(bigDataJobSubscriptionId),
},
}, pulumi.Provider(google_beta))
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.random.RandomId;
import com.pulumi.random.RandomIdArgs;
import com.pulumi.gcp.dataflow.FlexTemplateJob;
import com.pulumi.gcp.dataflow.FlexTemplateJobArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var config = ctx.config();
final var bigDataJobSubscriptionId = config.get("bigDataJobSubscriptionId").orElse("projects/myproject/subscriptions/messages");
var bigDataJobNameSuffix = new RandomId("bigDataJobNameSuffix", RandomIdArgs.builder()
.byteLength(4)
.keepers(Map.ofEntries(
Map.entry("region", var_.region()),
Map.entry("subscription_id", bigDataJobSubscriptionId)
))
.build());
var bigDataJob = new FlexTemplateJob("bigDataJob", FlexTemplateJobArgs.builder()
.region(var_.region())
.containerSpecGcsPath("gs://my-bucket/templates/template.json")
.skipWaitOnJobTermination(true)
.parameters(Map.of("inputSubscription", bigDataJobSubscriptionId))
.build(), CustomResourceOptions.builder()
.provider(google_beta)
.build());
}
}
configuration:
bigDataJobSubscriptionId:
type: string
default: projects/myproject/subscriptions/messages
resources:
bigDataJobNameSuffix:
type: random:RandomId
properties:
byteLength: 4
keepers:
region: ${var.region}
subscription_id: ${bigDataJobSubscriptionId}
bigDataJob:
type: gcp:dataflow:FlexTemplateJob
properties:
region: ${var.region}
containerSpecGcsPath: gs://my-bucket/templates/template.json
skipWaitOnJobTermination: true
parameters:
inputSubscription: ${bigDataJobSubscriptionId}
options:
provider: ${["google-beta"]}

Import

This resource does not support import.

Properties

Link copied to clipboard

List of experiments that should be used by the job. An example value is "enable_stackdriver_agent_metrics".

Link copied to clipboard

The algorithm to use for autoscaling

Link copied to clipboard

The GCS path to the Dataflow job Flex Template.

Link copied to clipboard

Indicates if the job should use the streaming engine feature.

Link copied to clipboard
val id: Output<String>
Link copied to clipboard
val ipConfiguration: Output<String>?

The configuration for VM IPs. Options are "WORKER_IP_PUBLIC" or "WORKER_IP_PRIVATE".

Link copied to clipboard
val jobId: Output<String>

The unique ID of this job.

Link copied to clipboard
val kmsKeyName: Output<String>?

The name for the Cloud KMS key for the job. Key format is: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY

Link copied to clipboard
val labels: Output<Map<String, Any>>?

User labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. Note: This field is marked as deprecated as the API does not currently support adding labels. NOTE: Google-provided Dataflow templates often provide default labels that begin with goog-dataflow-provided. Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.

Link copied to clipboard

The machine type to use for launching the job. The default is n1-standard-1.

Link copied to clipboard
val machineType: Output<String>?

The machine type to use for the job.

Link copied to clipboard
val maxWorkers: Output<Int>?

The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to

Link copied to clipboard
val name: Output<String>

A unique name for the resource, required by Dataflow.

Link copied to clipboard
val network: Output<String>?

The network to which VMs will be assigned. If it is not provided, "default" will be used.

Link copied to clipboard
val numWorkers: Output<Int>?

The initial number of Google Compute Engine instances for the job.

Link copied to clipboard
val onDelete: Output<String>?

One of "drain" or "cancel". Specifies behavior of deletion during pulumi destroy. See above note.

Link copied to clipboard
val parameters: Output<Map<String, Any>>?

Key/Value pairs to be passed to the Dataflow job (as used in the template). Additional pipeline options such as serviceAccount, workerMachineType, etc can be specified here.

Link copied to clipboard
val project: Output<String>

The project in which the resource belongs. If it is not provided, the provider project is used.

Link copied to clipboard
val pulumiChildResources: Set<KotlinResource>
Link copied to clipboard
Link copied to clipboard
Link copied to clipboard
val region: Output<String>

The region in which the created job should run.

Link copied to clipboard

Docker registry location of container image to use for the 'worker harness. Default is the container for the version of the SDK. Note this field is only valid for portable pipelines.

Link copied to clipboard

The Service Account email used to create the job.

Link copied to clipboard

If true, treat DRAINING and CANCELLING as terminal job states and do not wait for further changes before removing from terraform state and moving on. WARNING: this will lead to job name conflicts if you do not ensure that the job names are different, e.g. by embedding a release ID or by using a random_id.

Link copied to clipboard
val stagingLocation: Output<String>

The Cloud Storage path to use for staging files. Must be a valid Cloud Storage URL, beginning with gs://.

Link copied to clipboard
val state: Output<String>

The current state of the resource, selected from the JobState enum

Link copied to clipboard
val subnetwork: Output<String>?

The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".

Link copied to clipboard
val tempLocation: Output<String>

The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.

Link copied to clipboard

Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job.

Link copied to clipboard
val type: Output<String>

The type of this job, selected from the JobType enum.

Link copied to clipboard
val urn: Output<String>