Table Args
data class TableArgs(val database: Output<String>? = null, val hiveOptions: Output<TableHiveOptionsArgs>? = null, val name: Output<String>? = null, val type: Output<String>? = null) : ConvertibleToJava<TableArgs>
Represents a table. To get more information about Table, see:
Example Usage
Biglake Table
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const catalog = new gcp.biglake.Catalog("catalog", {
name: "my_catalog",
location: "US",
});
const bucket = new gcp.storage.Bucket("bucket", {
name: "my_bucket",
location: "US",
forceDestroy: true,
uniformBucketLevelAccess: true,
});
const metadataFolder = new gcp.storage.BucketObject("metadata_folder", {
name: "metadata/",
content: " ",
bucket: bucket.name,
});
const dataFolder = new gcp.storage.BucketObject("data_folder", {
name: "data/",
content: " ",
bucket: bucket.name,
});
const database = new gcp.biglake.Database("database", {
name: "my_database",
catalog: catalog.id,
type: "HIVE",
hiveOptions: {
locationUri: pulumi.interpolate`gs://${bucket.name}/${metadataFolder.name}`,
parameters: {
owner: "Alex",
},
},
});
const table = new gcp.biglake.Table("table", {
name: "my_table",
database: database.id,
type: "HIVE",
hiveOptions: {
tableType: "MANAGED_TABLE",
storageDescriptor: {
locationUri: pulumi.interpolate`gs://${bucket.name}/${dataFolder.name}`,
inputFormat: "org.apache.hadoop.mapred.SequenceFileInputFormat",
outputFormat: "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
},
parameters: {
"spark.sql.create.version": "3.1.3",
"spark.sql.sources.schema.numParts": "1",
transient_lastDdlTime: "1680894197",
"spark.sql.partitionProvider": "catalog",
owner: "John Doe",
"spark.sql.sources.schema.part.0": "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}",
"spark.sql.sources.provider": "iceberg",
provider: "iceberg",
},
},
});
Content copied to clipboard
import pulumi
import pulumi_gcp as gcp
catalog = gcp.biglake.Catalog("catalog",
name="my_catalog",
location="US")
bucket = gcp.storage.Bucket("bucket",
name="my_bucket",
location="US",
force_destroy=True,
uniform_bucket_level_access=True)
metadata_folder = gcp.storage.BucketObject("metadata_folder",
name="metadata/",
content=" ",
bucket=bucket.name)
data_folder = gcp.storage.BucketObject("data_folder",
name="data/",
content=" ",
bucket=bucket.name)
database = gcp.biglake.Database("database",
name="my_database",
catalog=catalog.id,
type="HIVE",
hive_options={
"location_uri": pulumi.Output.all(
bucketName=bucket.name,
metadataFolderName=metadata_folder.name
).apply(lambda resolved_outputs: f"gs://{resolved_outputs['bucketName']}/{resolved_outputs['metadataFolderName']}")
,
"parameters": {
"owner": "Alex",
},
})
table = gcp.biglake.Table("table",
name="my_table",
database=database.id,
type="HIVE",
hive_options={
"table_type": "MANAGED_TABLE",
"storage_descriptor": {
"location_uri": pulumi.Output.all(
bucketName=bucket.name,
dataFolderName=data_folder.name
).apply(lambda resolved_outputs: f"gs://{resolved_outputs['bucketName']}/{resolved_outputs['dataFolderName']}")
,
"input_format": "org.apache.hadoop.mapred.SequenceFileInputFormat",
"output_format": "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
},
"parameters": {
"spark_sql_create_version": "3.1.3",
"spark_sql_sources_schema_num_parts": "1",
"transient_last_ddl_time": "1680894197",
"spark_sql_partition_provider": "catalog",
"owner": "John Doe",
"spark_sql_sources_schema_part_0": "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}",
"spark_sql_sources_provider": "iceberg",
"provider": "iceberg",
},
})
Content copied to clipboard
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var catalog = new Gcp.BigLake.Catalog("catalog", new()
{
Name = "my_catalog",
Location = "US",
});
var bucket = new Gcp.Storage.Bucket("bucket", new()
{
Name = "my_bucket",
Location = "US",
ForceDestroy = true,
UniformBucketLevelAccess = true,
});
var metadataFolder = new Gcp.Storage.BucketObject("metadata_folder", new()
{
Name = "metadata/",
Content = " ",
Bucket = bucket.Name,
});
var dataFolder = new Gcp.Storage.BucketObject("data_folder", new()
{
Name = "data/",
Content = " ",
Bucket = bucket.Name,
});
var database = new Gcp.BigLake.Database("database", new()
{
Name = "my_database",
Catalog = catalog.Id,
Type = "HIVE",
HiveOptions = new Gcp.BigLake.Inputs.DatabaseHiveOptionsArgs
{
LocationUri = Output.Tuple(bucket.Name, metadataFolder.Name).Apply(values =>
{
var bucketName = values.Item1;
var metadataFolderName = values.Item2;
return $"gs://{bucketName}/{metadataFolderName}";
}),
Parameters =
{
{ "owner", "Alex" },
},
},
});
var table = new Gcp.BigLake.Table("table", new()
{
Name = "my_table",
Database = database.Id,
Type = "HIVE",
HiveOptions = new Gcp.BigLake.Inputs.TableHiveOptionsArgs
{
TableType = "MANAGED_TABLE",
StorageDescriptor = new Gcp.BigLake.Inputs.TableHiveOptionsStorageDescriptorArgs
{
LocationUri = Output.Tuple(bucket.Name, dataFolder.Name).Apply(values =>
{
var bucketName = values.Item1;
var dataFolderName = values.Item2;
return $"gs://{bucketName}/{dataFolderName}";
}),
InputFormat = "org.apache.hadoop.mapred.SequenceFileInputFormat",
OutputFormat = "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
},
Parameters =
{
{ "spark.sql.create.version", "3.1.3" },
{ "spark.sql.sources.schema.numParts", "1" },
{ "transient_lastDdlTime", "1680894197" },
{ "spark.sql.partitionProvider", "catalog" },
{ "owner", "John Doe" },
{ "spark.sql.sources.schema.part.0", "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}" },
{ "spark.sql.sources.provider", "iceberg" },
{ "provider", "iceberg" },
},
},
});
});
Content copied to clipboard
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/biglake"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
catalog, err := biglake.NewCatalog(ctx, "catalog", &biglake.CatalogArgs{
Name: pulumi.String("my_catalog"),
Location: pulumi.String("US"),
})
if err != nil {
return err
}
bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
Name: pulumi.String("my_bucket"),
Location: pulumi.String("US"),
ForceDestroy: pulumi.Bool(true),
UniformBucketLevelAccess: pulumi.Bool(true),
})
if err != nil {
return err
}
metadataFolder, err := storage.NewBucketObject(ctx, "metadata_folder", &storage.BucketObjectArgs{
Name: pulumi.String("metadata/"),
Content: pulumi.String(" "),
Bucket: bucket.Name,
})
if err != nil {
return err
}
dataFolder, err := storage.NewBucketObject(ctx, "data_folder", &storage.BucketObjectArgs{
Name: pulumi.String("data/"),
Content: pulumi.String(" "),
Bucket: bucket.Name,
})
if err != nil {
return err
}
database, err := biglake.NewDatabase(ctx, "database", &biglake.DatabaseArgs{
Name: pulumi.String("my_database"),
Catalog: catalog.ID(),
Type: pulumi.String("HIVE"),
HiveOptions: &biglake.DatabaseHiveOptionsArgs{
LocationUri: pulumi.All(bucket.Name, metadataFolder.Name).ApplyT(func(_args []interface{}) (string, error) {
bucketName := _args[0].(string)
metadataFolderName := _args[1].(string)
return fmt.Sprintf("gs://%v/%v", bucketName, metadataFolderName), nil
}).(pulumi.StringOutput),
Parameters: pulumi.StringMap{
"owner": pulumi.String("Alex"),
},
},
})
if err != nil {
return err
}
_, err = biglake.NewTable(ctx, "table", &biglake.TableArgs{
Name: pulumi.String("my_table"),
Database: database.ID(),
Type: pulumi.String("HIVE"),
HiveOptions: &biglake.TableHiveOptionsArgs{
TableType: pulumi.String("MANAGED_TABLE"),
StorageDescriptor: &biglake.TableHiveOptionsStorageDescriptorArgs{
LocationUri: pulumi.All(bucket.Name, dataFolder.Name).ApplyT(func(_args []interface{}) (string, error) {
bucketName := _args[0].(string)
dataFolderName := _args[1].(string)
return fmt.Sprintf("gs://%v/%v", bucketName, dataFolderName), nil
}).(pulumi.StringOutput),
InputFormat: pulumi.String("org.apache.hadoop.mapred.SequenceFileInputFormat"),
OutputFormat: pulumi.String("org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat"),
},
Parameters: pulumi.StringMap{
"spark.sql.create.version": pulumi.String("3.1.3"),
"spark.sql.sources.schema.numParts": pulumi.String("1"),
"transient_lastDdlTime": pulumi.String("1680894197"),
"spark.sql.partitionProvider": pulumi.String("catalog"),
"owner": pulumi.String("John Doe"),
"spark.sql.sources.schema.part.0": pulumi.String("{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}"),
"spark.sql.sources.provider": pulumi.String("iceberg"),
"provider": pulumi.String("iceberg"),
},
},
})
if err != nil {
return err
}
return nil
})
}
Content copied to clipboard
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.biglake.Catalog;
import com.pulumi.gcp.biglake.CatalogArgs;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.storage.BucketObject;
import com.pulumi.gcp.storage.BucketObjectArgs;
import com.pulumi.gcp.biglake.Database;
import com.pulumi.gcp.biglake.DatabaseArgs;
import com.pulumi.gcp.biglake.inputs.DatabaseHiveOptionsArgs;
import com.pulumi.gcp.biglake.Table;
import com.pulumi.gcp.biglake.TableArgs;
import com.pulumi.gcp.biglake.inputs.TableHiveOptionsArgs;
import com.pulumi.gcp.biglake.inputs.TableHiveOptionsStorageDescriptorArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var catalog = new Catalog("catalog", CatalogArgs.builder()
.name("my_catalog")
.location("US")
.build());
var bucket = new Bucket("bucket", BucketArgs.builder()
.name("my_bucket")
.location("US")
.forceDestroy(true)
.uniformBucketLevelAccess(true)
.build());
var metadataFolder = new BucketObject("metadataFolder", BucketObjectArgs.builder()
.name("metadata/")
.content(" ")
.bucket(bucket.name())
.build());
var dataFolder = new BucketObject("dataFolder", BucketObjectArgs.builder()
.name("data/")
.content(" ")
.bucket(bucket.name())
.build());
var database = new Database("database", DatabaseArgs.builder()
.name("my_database")
.catalog(catalog.id())
.type("HIVE")
.hiveOptions(DatabaseHiveOptionsArgs.builder()
.locationUri(Output.tuple(bucket.name(), metadataFolder.name()).applyValue(values -> {
var bucketName = values.t1;
var metadataFolderName = values.t2;
return String.format("gs://%s/%s", bucketName,metadataFolderName);
}))
.parameters(Map.of("owner", "Alex"))
.build())
.build());
var table = new Table("table", TableArgs.builder()
.name("my_table")
.database(database.id())
.type("HIVE")
.hiveOptions(TableHiveOptionsArgs.builder()
.tableType("MANAGED_TABLE")
.storageDescriptor(TableHiveOptionsStorageDescriptorArgs.builder()
.locationUri(Output.tuple(bucket.name(), dataFolder.name()).applyValue(values -> {
var bucketName = values.t1;
var dataFolderName = values.t2;
return String.format("gs://%s/%s", bucketName,dataFolderName);
}))
.inputFormat("org.apache.hadoop.mapred.SequenceFileInputFormat")
.outputFormat("org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat")
.build())
.parameters(Map.ofEntries(
Map.entry("spark.sql.create.version", "3.1.3"),
Map.entry("spark.sql.sources.schema.numParts", "1"),
Map.entry("transient_lastDdlTime", "1680894197"),
Map.entry("spark.sql.partitionProvider", "catalog"),
Map.entry("owner", "John Doe"),
Map.entry("spark.sql.sources.schema.part.0", "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}"),
Map.entry("spark.sql.sources.provider", "iceberg"),
Map.entry("provider", "iceberg")
))
.build())
.build());
}
}
Content copied to clipboard
resources:
catalog:
type: gcp:biglake:Catalog
properties:
name: my_catalog
location: US
bucket:
type: gcp:storage:Bucket
properties:
name: my_bucket
location: US
forceDestroy: true
uniformBucketLevelAccess: true
metadataFolder:
type: gcp:storage:BucketObject
name: metadata_folder
properties:
name: metadata/
content: ' '
bucket: ${bucket.name}
dataFolder:
type: gcp:storage:BucketObject
name: data_folder
properties:
name: data/
content: ' '
bucket: ${bucket.name}
database:
type: gcp:biglake:Database
properties:
name: my_database
catalog: ${catalog.id}
type: HIVE
hiveOptions:
locationUri: gs://${bucket.name}/${metadataFolder.name}
parameters:
owner: Alex
table:
type: gcp:biglake:Table
properties:
name: my_table
database: ${database.id}
type: HIVE
hiveOptions:
tableType: MANAGED_TABLE
storageDescriptor:
locationUri: gs://${bucket.name}/${dataFolder.name}
inputFormat: org.apache.hadoop.mapred.SequenceFileInputFormat
outputFormat: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
parameters:
spark.sql.create.version: 3.1.3
spark.sql.sources.schema.numParts: '1'
transient_lastDdlTime: '1680894197'
spark.sql.partitionProvider: catalog
owner: John Doe
spark.sql.sources.schema.part.0: '{"type":"struct","fields":[{"name":"id","type":"integer","nullable":true,"metadata":{}},{"name":"name","type":"string","nullable":true,"metadata":{}},{"name":"age","type":"integer","nullable":true,"metadata":{}}]}'
spark.sql.sources.provider: iceberg
provider: iceberg
Content copied to clipboard
Import
Table can be imported using any of these accepted formats:
{{database}}/tables/{{name}}
When using thepulumi import
command, Table can be imported using one of the formats above. For example:
$ pulumi import gcp:biglake/table:Table default {{database}}/tables/{{name}}
Content copied to clipboard
Constructors
Link copied to clipboard
constructor(database: Output<String>? = null, hiveOptions: Output<TableHiveOptionsArgs>? = null, name: Output<String>? = null, type: Output<String>? = null)