Connect Cluster Args
Example Usage
Managedkafka Connect Cluster Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const mkcNetwork = new gcp.compute.Network("mkc_network", {
name: "my-network",
autoCreateSubnetworks: false,
});
const mkcSubnet = new gcp.compute.Subnetwork("mkc_subnet", {
name: "my-subnetwork",
ipCidrRange: "10.2.0.0/16",
region: "us-central1",
network: mkcNetwork.id,
});
const mkcAdditionalSubnet = new gcp.compute.Subnetwork("mkc_additional_subnet", {
name: "my-additional-subnetwork-0",
ipCidrRange: "10.3.0.0/16",
region: "us-central1",
network: mkcNetwork.id,
});
const project = gcp.organizations.getProject({});
const gmkCluster = new gcp.managedkafka.Cluster("gmk_cluster", {
clusterId: "my-cluster",
location: "us-central1",
capacityConfig: {
vcpuCount: "3",
memoryBytes: "3221225472",
},
gcpConfig: {
accessConfig: {
networkConfigs: [{
subnet: pulumi.all([project, mkcSubnet.id]).apply(([project, id]) => `projects/${project.projectId}/regions/us-central1/subnetworks/${id}`),
}],
},
},
});
const example = new gcp.managedkafka.ConnectCluster("example", {
connectClusterId: "my-connect-cluster",
kafkaCluster: pulumi.all([project, gmkCluster.clusterId]).apply(([project, clusterId]) => `projects/${project.projectId}/locations/us-central1/clusters/${clusterId}`),
location: "us-central1",
capacityConfig: {
vcpuCount: "12",
memoryBytes: "21474836480",
},
gcpConfig: {
accessConfig: {
networkConfigs: [{
primarySubnet: pulumi.all([project, mkcSubnet.id]).apply(([project, id]) => `projects/${project.projectId}/regions/us-central1/subnetworks/${id}`),
additionalSubnets: [mkcAdditionalSubnet.id],
dnsDomainNames: [pulumi.all([gmkCluster.clusterId, project]).apply(([clusterId, project]) => `${clusterId}.us-central1.managedkafka-staging.${project.projectId}.cloud-staging.goog`)],
}],
},
},
labels: {
key: "value",
},
});
import pulumi
import pulumi_gcp as gcp
mkc_network = gcp.compute.Network("mkc_network",
name="my-network",
auto_create_subnetworks=False)
mkc_subnet = gcp.compute.Subnetwork("mkc_subnet",
name="my-subnetwork",
ip_cidr_range="10.2.0.0/16",
region="us-central1",
network=mkc_network.id)
mkc_additional_subnet = gcp.compute.Subnetwork("mkc_additional_subnet",
name="my-additional-subnetwork-0",
ip_cidr_range="10.3.0.0/16",
region="us-central1",
network=mkc_network.id)
project = gcp.organizations.get_project()
gmk_cluster = gcp.managedkafka.Cluster("gmk_cluster",
cluster_id="my-cluster",
location="us-central1",
capacity_config={
"vcpu_count": "3",
"memory_bytes": "3221225472",
},
gcp_config={
"access_config": {
"network_configs": [{
"subnet": mkc_subnet.id.apply(lambda id: f"projects/{project.project_id}/regions/us-central1/subnetworks/{id}"),
}],
},
})
example = gcp.managedkafka.ConnectCluster("example",
connect_cluster_id="my-connect-cluster",
kafka_cluster=gmk_cluster.cluster_id.apply(lambda cluster_id: f"projects/{project.project_id}/locations/us-central1/clusters/{cluster_id}"),
location="us-central1",
capacity_config={
"vcpu_count": "12",
"memory_bytes": "21474836480",
},
gcp_config={
"access_config": {
"network_configs": [{
"primary_subnet": mkc_subnet.id.apply(lambda id: f"projects/{project.project_id}/regions/us-central1/subnetworks/{id}"),
"additional_subnets": [mkc_additional_subnet.id],
"dns_domain_names": [gmk_cluster.cluster_id.apply(lambda cluster_id: f"{cluster_id}.us-central1.managedkafka-staging.{project.project_id}.cloud-staging.goog")],
}],
},
},
labels={
"key": "value",
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var mkcNetwork = new Gcp.Compute.Network("mkc_network", new()
{
Name = "my-network",
AutoCreateSubnetworks = false,
});
var mkcSubnet = new Gcp.Compute.Subnetwork("mkc_subnet", new()
{
Name = "my-subnetwork",
IpCidrRange = "10.2.0.0/16",
Region = "us-central1",
Network = mkcNetwork.Id,
});
var mkcAdditionalSubnet = new Gcp.Compute.Subnetwork("mkc_additional_subnet", new()
{
Name = "my-additional-subnetwork-0",
IpCidrRange = "10.3.0.0/16",
Region = "us-central1",
Network = mkcNetwork.Id,
});
var project = Gcp.Organizations.GetProject.Invoke();
var gmkCluster = new Gcp.ManagedKafka.Cluster("gmk_cluster", new()
{
ClusterId = "my-cluster",
Location = "us-central1",
CapacityConfig = new Gcp.ManagedKafka.Inputs.ClusterCapacityConfigArgs
{
VcpuCount = "3",
MemoryBytes = "3221225472",
},
GcpConfig = new Gcp.ManagedKafka.Inputs.ClusterGcpConfigArgs
{
AccessConfig = new Gcp.ManagedKafka.Inputs.ClusterGcpConfigAccessConfigArgs
{
NetworkConfigs = new[]
{
new Gcp.ManagedKafka.Inputs.ClusterGcpConfigAccessConfigNetworkConfigArgs
{
Subnet = Output.Tuple(project, mkcSubnet.Id).Apply(values =>
{
var project = values.Item1;
var id = values.Item2;
return $"projects/{project.Apply(getProjectResult => getProjectResult.ProjectId)}/regions/us-central1/subnetworks/{id}";
}),
},
},
},
},
});
var example = new Gcp.ManagedKafka.ConnectCluster("example", new()
{
ConnectClusterId = "my-connect-cluster",
KafkaCluster = Output.Tuple(project, gmkCluster.ClusterId).Apply(values =>
{
var project = values.Item1;
var clusterId = values.Item2;
return $"projects/{project.Apply(getProjectResult => getProjectResult.ProjectId)}/locations/us-central1/clusters/{clusterId}";
}),
Location = "us-central1",
CapacityConfig = new Gcp.ManagedKafka.Inputs.ConnectClusterCapacityConfigArgs
{
VcpuCount = "12",
MemoryBytes = "21474836480",
},
GcpConfig = new Gcp.ManagedKafka.Inputs.ConnectClusterGcpConfigArgs
{
AccessConfig = new Gcp.ManagedKafka.Inputs.ConnectClusterGcpConfigAccessConfigArgs
{
NetworkConfigs = new[]
{
new Gcp.ManagedKafka.Inputs.ConnectClusterGcpConfigAccessConfigNetworkConfigArgs
{
PrimarySubnet = Output.Tuple(project, mkcSubnet.Id).Apply(values =>
{
var project = values.Item1;
var id = values.Item2;
return $"projects/{project.Apply(getProjectResult => getProjectResult.ProjectId)}/regions/us-central1/subnetworks/{id}";
}),
AdditionalSubnets = new[]
{
mkcAdditionalSubnet.Id,
},
DnsDomainNames = new[]
{
Output.Tuple(gmkCluster.ClusterId, project).Apply(values =>
{
var clusterId = values.Item1;
var project = values.Item2;
return $"{clusterId}.us-central1.managedkafka-staging.{project.Apply(getProjectResult => getProjectResult.ProjectId)}.cloud-staging.goog";
}),
},
},
},
},
},
Labels =
{
{ "key", "value" },
},
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/managedkafka"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
mkcNetwork, err := compute.NewNetwork(ctx, "mkc_network", &compute.NetworkArgs{
Name: pulumi.String("my-network"),
AutoCreateSubnetworks: pulumi.Bool(false),
})
if err != nil {
return err
}
mkcSubnet, err := compute.NewSubnetwork(ctx, "mkc_subnet", &compute.SubnetworkArgs{
Name: pulumi.String("my-subnetwork"),
IpCidrRange: pulumi.String("10.2.0.0/16"),
Region: pulumi.String("us-central1"),
Network: mkcNetwork.ID(),
})
if err != nil {
return err
}
mkcAdditionalSubnet, err := compute.NewSubnetwork(ctx, "mkc_additional_subnet", &compute.SubnetworkArgs{
Name: pulumi.String("my-additional-subnetwork-0"),
IpCidrRange: pulumi.String("10.3.0.0/16"),
Region: pulumi.String("us-central1"),
Network: mkcNetwork.ID(),
})
if err != nil {
return err
}
project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
if err != nil {
return err
}
gmkCluster, err := managedkafka.NewCluster(ctx, "gmk_cluster", &managedkafka.ClusterArgs{
ClusterId: pulumi.String("my-cluster"),
Location: pulumi.String("us-central1"),
CapacityConfig: &managedkafka.ClusterCapacityConfigArgs{
VcpuCount: pulumi.String("3"),
MemoryBytes: pulumi.String("3221225472"),
},
GcpConfig: &managedkafka.ClusterGcpConfigArgs{
AccessConfig: &managedkafka.ClusterGcpConfigAccessConfigArgs{
NetworkConfigs: managedkafka.ClusterGcpConfigAccessConfigNetworkConfigArray{
&managedkafka.ClusterGcpConfigAccessConfigNetworkConfigArgs{
Subnet: mkcSubnet.ID().ApplyT(func(id string) (string, error) {
return fmt.Sprintf("projects/%v/regions/us-central1/subnetworks/%v", project.ProjectId, id), nil
}).(pulumi.StringOutput),
},
},
},
},
})
if err != nil {
return err
}
_, err = managedkafka.NewConnectCluster(ctx, "example", &managedkafka.ConnectClusterArgs{
ConnectClusterId: pulumi.String("my-connect-cluster"),
KafkaCluster: gmkCluster.ClusterId.ApplyT(func(clusterId string) (string, error) {
return fmt.Sprintf("projects/%v/locations/us-central1/clusters/%v", project.ProjectId, clusterId), nil
}).(pulumi.StringOutput),
Location: pulumi.String("us-central1"),
CapacityConfig: &managedkafka.ConnectClusterCapacityConfigArgs{
VcpuCount: pulumi.String("12"),
MemoryBytes: pulumi.String("21474836480"),
},
GcpConfig: &managedkafka.ConnectClusterGcpConfigArgs{
AccessConfig: &managedkafka.ConnectClusterGcpConfigAccessConfigArgs{
NetworkConfigs: managedkafka.ConnectClusterGcpConfigAccessConfigNetworkConfigArray{
&managedkafka.ConnectClusterGcpConfigAccessConfigNetworkConfigArgs{
PrimarySubnet: mkcSubnet.ID().ApplyT(func(id string) (string, error) {
return fmt.Sprintf("projects/%v/regions/us-central1/subnetworks/%v", project.ProjectId, id), nil
}).(pulumi.StringOutput),
AdditionalSubnets: pulumi.StringArray{
mkcAdditionalSubnet.ID(),
},
DnsDomainNames: pulumi.StringArray{
gmkCluster.ClusterId.ApplyT(func(clusterId string) (string, error) {
return fmt.Sprintf("%v.us-central1.managedkafka-staging.%v.cloud-staging.goog", clusterId, project.ProjectId), nil
}).(pulumi.StringOutput),
},
},
},
},
},
Labels: pulumi.StringMap{
"key": pulumi.String("value"),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.Network;
import com.pulumi.gcp.compute.NetworkArgs;
import com.pulumi.gcp.compute.Subnetwork;
import com.pulumi.gcp.compute.SubnetworkArgs;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.managedkafka.Cluster;
import com.pulumi.gcp.managedkafka.ClusterArgs;
import com.pulumi.gcp.managedkafka.inputs.ClusterCapacityConfigArgs;
import com.pulumi.gcp.managedkafka.inputs.ClusterGcpConfigArgs;
import com.pulumi.gcp.managedkafka.inputs.ClusterGcpConfigAccessConfigArgs;
import com.pulumi.gcp.managedkafka.ConnectCluster;
import com.pulumi.gcp.managedkafka.ConnectClusterArgs;
import com.pulumi.gcp.managedkafka.inputs.ConnectClusterCapacityConfigArgs;
import com.pulumi.gcp.managedkafka.inputs.ConnectClusterGcpConfigArgs;
import com.pulumi.gcp.managedkafka.inputs.ConnectClusterGcpConfigAccessConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var mkcNetwork = new Network("mkcNetwork", NetworkArgs.builder()
.name("my-network")
.autoCreateSubnetworks(false)
.build());
var mkcSubnet = new Subnetwork("mkcSubnet", SubnetworkArgs.builder()
.name("my-subnetwork")
.ipCidrRange("10.2.0.0/16")
.region("us-central1")
.network(mkcNetwork.id())
.build());
var mkcAdditionalSubnet = new Subnetwork("mkcAdditionalSubnet", SubnetworkArgs.builder()
.name("my-additional-subnetwork-0")
.ipCidrRange("10.3.0.0/16")
.region("us-central1")
.network(mkcNetwork.id())
.build());
final var project = OrganizationsFunctions.getProject(GetProjectArgs.builder()
.build());
var gmkCluster = new Cluster("gmkCluster", ClusterArgs.builder()
.clusterId("my-cluster")
.location("us-central1")
.capacityConfig(ClusterCapacityConfigArgs.builder()
.vcpuCount("3")
.memoryBytes("3221225472")
.build())
.gcpConfig(ClusterGcpConfigArgs.builder()
.accessConfig(ClusterGcpConfigAccessConfigArgs.builder()
.networkConfigs(ClusterGcpConfigAccessConfigNetworkConfigArgs.builder()
.subnet(mkcSubnet.id().applyValue(_id -> String.format("projects/%s/regions/us-central1/subnetworks/%s", project.projectId(),_id)))
.build())
.build())
.build())
.build());
var example = new ConnectCluster("example", ConnectClusterArgs.builder()
.connectClusterId("my-connect-cluster")
.kafkaCluster(gmkCluster.clusterId().applyValue(_clusterId -> String.format("projects/%s/locations/us-central1/clusters/%s", project.projectId(),_clusterId)))
.location("us-central1")
.capacityConfig(ConnectClusterCapacityConfigArgs.builder()
.vcpuCount("12")
.memoryBytes("21474836480")
.build())
.gcpConfig(ConnectClusterGcpConfigArgs.builder()
.accessConfig(ConnectClusterGcpConfigAccessConfigArgs.builder()
.networkConfigs(ConnectClusterGcpConfigAccessConfigNetworkConfigArgs.builder()
.primarySubnet(mkcSubnet.id().applyValue(_id -> String.format("projects/%s/regions/us-central1/subnetworks/%s", project.projectId(),_id)))
.additionalSubnets(mkcAdditionalSubnet.id())
.dnsDomainNames(gmkCluster.clusterId().applyValue(_clusterId -> String.format("%s.us-central1.managedkafka-staging.%s.cloud-staging.goog", _clusterId,project.projectId())))
.build())
.build())
.build())
.labels(Map.of("key", "value"))
.build());
}
}
resources:
mkcNetwork:
type: gcp:compute:Network
name: mkc_network
properties:
name: my-network
autoCreateSubnetworks: false
mkcSubnet:
type: gcp:compute:Subnetwork
name: mkc_subnet
properties:
name: my-subnetwork
ipCidrRange: 10.2.0.0/16
region: us-central1
network: ${mkcNetwork.id}
mkcAdditionalSubnet:
type: gcp:compute:Subnetwork
name: mkc_additional_subnet
properties:
name: my-additional-subnetwork-0
ipCidrRange: 10.3.0.0/16
region: us-central1
network: ${mkcNetwork.id}
gmkCluster:
type: gcp:managedkafka:Cluster
name: gmk_cluster
properties:
clusterId: my-cluster
location: us-central1
capacityConfig:
vcpuCount: 3
memoryBytes: 3.221225472e+09
gcpConfig:
accessConfig:
networkConfigs:
- subnet: projects/${project.projectId}/regions/us-central1/subnetworks/${mkcSubnet.id}
example:
type: gcp:managedkafka:ConnectCluster
properties:
connectClusterId: my-connect-cluster
kafkaCluster: projects/${project.projectId}/locations/us-central1/clusters/${gmkCluster.clusterId}
location: us-central1
capacityConfig:
vcpuCount: 12
memoryBytes: 2.147483648e+10
gcpConfig:
accessConfig:
networkConfigs:
- primarySubnet: projects/${project.projectId}/regions/us-central1/subnetworks/${mkcSubnet.id}
additionalSubnets:
- ${mkcAdditionalSubnet.id}
dnsDomainNames:
- ${gmkCluster.clusterId}.us-central1.managedkafka-staging.${project.projectId}.cloud-staging.goog
labels:
key: value
variables:
project:
fn::invoke:
function: gcp:organizations:getProject
arguments: {}
Import
ConnectCluster can be imported using any of these accepted formats:
projects/{{project}}/locations/{{location}}/connectClusters/{{connect_cluster_id}}
{{project}}/{{location}}/{{connect_cluster_id}}
{{location}}/{{connect_cluster_id}}
When using thepulumi import
command, ConnectCluster can be imported using one of the formats above. For example:
$ pulumi import gcp:managedkafka/connectCluster:ConnectCluster default projects/{{project}}/locations/{{location}}/connectClusters/{{connect_cluster_id}}
$ pulumi import gcp:managedkafka/connectCluster:ConnectCluster default {{project}}/{{location}}/{{connect_cluster_id}}
$ pulumi import gcp:managedkafka/connectCluster:ConnectCluster default {{location}}/{{connect_cluster_id}}
Constructors
Properties
A capacity configuration of a Kafka cluster. Structure is documented below.
The ID to use for the Connect Cluster, which will become the final component of the connect cluster's name. This value is structured like: my-connect-cluster-id
.
Configuration properties for a Kafka Connect cluster deployed to Google Cloud Platform. Structure is documented below.
The name of the Kafka cluster this Kafka Connect cluster is attached to. Structured like: projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID
.
List of label KEY=VALUE pairs to add. Keys must start with a lowercase character and contain only hyphens (-), underscores ( ), lowercase characters, and numbers. Values must contain only hyphens (-), underscores ( ), lowercase characters, and numbers. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.