Table Args
Creates a Google Cloud Bigtable table inside an instance. For more information see the official documentation and API.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const instance = new gcp.bigtable.Instance("instance", {
name: "tf-instance",
clusters: [{
clusterId: "tf-instance-cluster",
zone: "us-central1-b",
numNodes: 3,
storageType: "HDD",
}],
});
const table = new gcp.bigtable.Table("table", {
name: "tf-table",
instanceName: instance.name,
splitKeys: [
"a",
"b",
"c",
],
columnFamilies: [
{
family: "family-first",
},
{
family: "family-second",
},
],
changeStreamRetention: "24h0m0s",
automatedBackupPolicy: {
retentionPeriod: "72h0m0s",
frequency: "24h0m0s",
},
});
import pulumi
import pulumi_gcp as gcp
instance = gcp.bigtable.Instance("instance",
name="tf-instance",
clusters=[{
"cluster_id": "tf-instance-cluster",
"zone": "us-central1-b",
"num_nodes": 3,
"storage_type": "HDD",
}])
table = gcp.bigtable.Table("table",
name="tf-table",
instance_name=instance.name,
split_keys=[
"a",
"b",
"c",
],
column_families=[
{
"family": "family-first",
},
{
"family": "family-second",
},
],
change_stream_retention="24h0m0s",
automated_backup_policy={
"retention_period": "72h0m0s",
"frequency": "24h0m0s",
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var instance = new Gcp.BigTable.Instance("instance", new()
{
Name = "tf-instance",
Clusters = new[]
{
new Gcp.BigTable.Inputs.InstanceClusterArgs
{
ClusterId = "tf-instance-cluster",
Zone = "us-central1-b",
NumNodes = 3,
StorageType = "HDD",
},
},
});
var table = new Gcp.BigTable.Table("table", new()
{
Name = "tf-table",
InstanceName = instance.Name,
SplitKeys = new[]
{
"a",
"b",
"c",
},
ColumnFamilies = new[]
{
new Gcp.BigTable.Inputs.TableColumnFamilyArgs
{
Family = "family-first",
},
new Gcp.BigTable.Inputs.TableColumnFamilyArgs
{
Family = "family-second",
},
},
ChangeStreamRetention = "24h0m0s",
AutomatedBackupPolicy = new Gcp.BigTable.Inputs.TableAutomatedBackupPolicyArgs
{
RetentionPeriod = "72h0m0s",
Frequency = "24h0m0s",
},
});
});
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigtable"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
instance, err := bigtable.NewInstance(ctx, "instance", &bigtable.InstanceArgs{
Name: pulumi.String("tf-instance"),
Clusters: bigtable.InstanceClusterArray{
&bigtable.InstanceClusterArgs{
ClusterId: pulumi.String("tf-instance-cluster"),
Zone: pulumi.String("us-central1-b"),
NumNodes: pulumi.Int(3),
StorageType: pulumi.String("HDD"),
},
},
})
if err != nil {
return err
}
_, err = bigtable.NewTable(ctx, "table", &bigtable.TableArgs{
Name: pulumi.String("tf-table"),
InstanceName: instance.Name,
SplitKeys: pulumi.StringArray{
pulumi.String("a"),
pulumi.String("b"),
pulumi.String("c"),
},
ColumnFamilies: bigtable.TableColumnFamilyArray{
&bigtable.TableColumnFamilyArgs{
Family: pulumi.String("family-first"),
},
&bigtable.TableColumnFamilyArgs{
Family: pulumi.String("family-second"),
},
},
ChangeStreamRetention: pulumi.String("24h0m0s"),
AutomatedBackupPolicy: &bigtable.TableAutomatedBackupPolicyArgs{
RetentionPeriod: pulumi.String("72h0m0s"),
Frequency: pulumi.String("24h0m0s"),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigtable.Instance;
import com.pulumi.gcp.bigtable.InstanceArgs;
import com.pulumi.gcp.bigtable.inputs.InstanceClusterArgs;
import com.pulumi.gcp.bigtable.Table;
import com.pulumi.gcp.bigtable.TableArgs;
import com.pulumi.gcp.bigtable.inputs.TableColumnFamilyArgs;
import com.pulumi.gcp.bigtable.inputs.TableAutomatedBackupPolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var instance = new Instance("instance", InstanceArgs.builder()
.name("tf-instance")
.clusters(InstanceClusterArgs.builder()
.clusterId("tf-instance-cluster")
.zone("us-central1-b")
.numNodes(3)
.storageType("HDD")
.build())
.build());
var table = new Table("table", TableArgs.builder()
.name("tf-table")
.instanceName(instance.name())
.splitKeys(
"a",
"b",
"c")
.columnFamilies(
TableColumnFamilyArgs.builder()
.family("family-first")
.build(),
TableColumnFamilyArgs.builder()
.family("family-second")
.build())
.changeStreamRetention("24h0m0s")
.automatedBackupPolicy(TableAutomatedBackupPolicyArgs.builder()
.retentionPeriod("72h0m0s")
.frequency("24h0m0s")
.build())
.build());
}
}
resources:
instance:
type: gcp:bigtable:Instance
properties:
name: tf-instance
clusters:
- clusterId: tf-instance-cluster
zone: us-central1-b
numNodes: 3
storageType: HDD
table:
type: gcp:bigtable:Table
properties:
name: tf-table
instanceName: ${instance.name}
splitKeys:
- a
- b
- c
columnFamilies:
- family: family-first
- family: family-second
changeStreamRetention: 24h0m0s
automatedBackupPolicy:
retentionPeriod: 72h0m0s
frequency: 24h0m0s
Import
->Fields affected by import The following fields can't be read and will show diffs if set in config when imported: split_keys
Bigtable Tables can be imported using any of these accepted formats:
projects/{{project}}/instances/{{instance_name}}/tables/{{name}}
{{project}}/{{instance_name}}/{{name}}
{{instance_name}}/{{name}}
When using thepulumi import
command, Bigtable Tables can be imported using one of the formats above. For example:
$ pulumi import gcp:bigtable/table:Table default projects/{{project}}/instances/{{instance_name}}/tables/{{name}}
$ pulumi import gcp:bigtable/table:Table default {{project}}/{{instance_name}}/{{name}}
$ pulumi import gcp:bigtable/table:Table default {{instance_name}}/{{name}}
Constructors
Properties
Defines an automated backup policy for a table, specified by Retention Period and Frequency. To disable, set both Retention Period and Frequency to 0.
-----Duration to retain change stream data for the table. Set to 0 to disable. Must be between 1 and 7 days.
A group of columns within a table which share a common configuration. This can be specified multiple times. Structure is documented below.
A field to make the table protected against data loss i.e. when set to PROTECTED, deleting the table, the column families in the table, and the instance containing the table would be prohibited. If not provided, deletion protection will be set to UNPROTECTED.
The name of the Bigtable instance.