Job Args
Jobs are actions that BigQuery runs on your behalf to load data, export data, query data, or copy data. Once a BigQuery job is created, it cannot be changed or deleted. To get more information about Job, see:
How-to Guides
Example Usage
Bigquery Job Query
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryScriptOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bar = new Dataset("bar", DatasetArgs.builder()
.datasetId("job_query_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
var foo = new Table("foo", TableArgs.builder()
.deletionProtection(false)
.datasetId(bar.datasetId())
.tableId("job_query_table")
.build());
var job = new Job("job", JobArgs.builder()
.jobId("job_query")
.labels(Map.of("example-label", "example-value"))
.query(JobQueryArgs.builder()
.query("SELECT state FROM [lookerdata:cdc.project_tycho_reports]")
.destinationTable(JobQueryDestinationTableArgs.builder()
.projectId(foo.project())
.datasetId(foo.datasetId())
.tableId(foo.tableId())
.build())
.allowLargeResults(true)
.flattenResults(true)
.scriptOptions(JobQueryScriptOptionsArgs.builder()
.keyResultStatement("LAST")
.build())
.build())
.build());
}
}
Bigquery Job Query Table Reference
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryDefaultDatasetArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryScriptOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bar = new Dataset("bar", DatasetArgs.builder()
.datasetId("job_query_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
var foo = new Table("foo", TableArgs.builder()
.deletionProtection(false)
.datasetId(bar.datasetId())
.tableId("job_query_table")
.build());
var job = new Job("job", JobArgs.builder()
.jobId("job_query")
.labels(Map.of("example-label", "example-value"))
.query(JobQueryArgs.builder()
.query("SELECT state FROM [lookerdata:cdc.project_tycho_reports]")
.destinationTable(JobQueryDestinationTableArgs.builder()
.tableId(foo.id())
.build())
.defaultDataset(JobQueryDefaultDatasetArgs.builder()
.datasetId(bar.id())
.build())
.allowLargeResults(true)
.flattenResults(true)
.scriptOptions(JobQueryScriptOptionsArgs.builder()
.keyResultStatement("LAST")
.build())
.build())
.build());
}
}
Bigquery Job Load
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bar = new Dataset("bar", DatasetArgs.builder()
.datasetId("job_load_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
var foo = new Table("foo", TableArgs.builder()
.deletionProtection(false)
.datasetId(bar.datasetId())
.tableId("job_load_table")
.build());
var job = new Job("job", JobArgs.builder()
.jobId("job_load")
.labels(Map.of("my_job", "load"))
.load(JobLoadArgs.builder()
.sourceUris("gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv")
.destinationTable(JobLoadDestinationTableArgs.builder()
.projectId(foo.project())
.datasetId(foo.datasetId())
.tableId(foo.tableId())
.build())
.skipLeadingRows(1)
.schemaUpdateOptions(
"ALLOW_FIELD_RELAXATION",
"ALLOW_FIELD_ADDITION")
.writeDisposition("WRITE_APPEND")
.autodetect(true)
.build())
.build());
}
}
Bigquery Job Load Parquet
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.storage.BucketObject;
import com.pulumi.gcp.storage.BucketObjectArgs;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadParquetOptionsArgs;
import com.pulumi.asset.FileAsset;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testBucket = new Bucket("testBucket", BucketArgs.builder()
.location("US")
.uniformBucketLevelAccess(true)
.build());
var testBucketObject = new BucketObject("testBucketObject", BucketObjectArgs.builder()
.source(new FileAsset("./test-fixtures/test.parquet.gzip"))
.bucket(testBucket.name())
.build());
var testDataset = new Dataset("testDataset", DatasetArgs.builder()
.datasetId("job_load_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
var testTable = new Table("testTable", TableArgs.builder()
.deletionProtection(false)
.tableId("job_load_table")
.datasetId(testDataset.datasetId())
.build());
var job = new Job("job", JobArgs.builder()
.jobId("job_load")
.labels(Map.of("my_job", "load"))
.load(JobLoadArgs.builder()
.sourceUris(Output.tuple(testBucketObject.bucket(), testBucketObject.name()).applyValue(values -> {
var bucket = values.t1;
var name = values.t2;
return String.format("gs://%s/%s", bucket,name);
}))
.destinationTable(JobLoadDestinationTableArgs.builder()
.projectId(testTable.project())
.datasetId(testTable.datasetId())
.tableId(testTable.tableId())
.build())
.schemaUpdateOptions(
"ALLOW_FIELD_RELAXATION",
"ALLOW_FIELD_ADDITION")
.writeDisposition("WRITE_APPEND")
.sourceFormat("PARQUET")
.autodetect(true)
.parquetOptions(JobLoadParquetOptionsArgs.builder()
.enumAsString(true)
.enableListInference(true)
.build())
.build())
.build());
}
}
Bigquery Job Extract
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobExtractArgs;
import com.pulumi.gcp.bigquery.inputs.JobExtractSourceTableArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var source_oneDataset = new Dataset("source-oneDataset", DatasetArgs.builder()
.datasetId("job_extract_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.build());
var source_oneTable = new Table("source-oneTable", TableArgs.builder()
.deletionProtection(false)
.datasetId(source_oneDataset.datasetId())
.tableId("job_extract_table")
.schema("""
[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "date",
"type": "DATE",
"mode": "NULLABLE"
}
]
""")
.build());
var dest = new Bucket("dest", BucketArgs.builder()
.location("US")
.forceDestroy(true)
.build());
var job = new Job("job", JobArgs.builder()
.jobId("job_extract")
.extract(JobExtractArgs.builder()
.destinationUris(dest.url().applyValue(url -> String.format("%s/extract", url)))
.sourceTable(JobExtractSourceTableArgs.builder()
.projectId(source_oneTable.project())
.datasetId(source_oneTable.datasetId())
.tableId(source_oneTable.tableId())
.build())
.destinationFormat("NEWLINE_DELIMITED_JSON")
.compression("GZIP")
.build())
.build());
}
}
Import
Job can be imported using any of these accepted formats
$ pulumi import gcp:bigquery/job:Job default projects/{{project}}/jobs/{{job_id}}/location/{{location}}
$ pulumi import gcp:bigquery/job:Job default projects/{{project}}/jobs/{{job_id}}
$ pulumi import gcp:bigquery/job:Job default {{project}}/{{job_id}}/{{location}}
$ pulumi import gcp:bigquery/job:Job default {{job_id}}/{{location}}
$ pulumi import gcp:bigquery/job:Job default {{project}}/{{job_id}}
$ pulumi import gcp:bigquery/job:Job default {{job_id}}
Constructors
Properties
Copies a table. Structure is documented below.
Configures an extract job. Structure is documented below.
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
Configures a load job. Structure is documented below.
SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE
, UPDATE
, MERGE
, INSERT
) must specify create_disposition = ""
and write_disposition = ""
.