Cluster Args
Cluster contains information about a Google Distributed Cloud Edge Kubernetes cluster. To get more information about Cluster, see:
How-to Guides
Example Usage
Edgecontainer Cluster
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const project = gcp.organizations.getProject({});
const _default = new gcp.edgecontainer.Cluster("default", {
name: "basic-cluster",
location: "us-central1",
authorization: {
adminUsers: {
username: "admin@hashicorptest.com",
},
},
networking: {
clusterIpv4CidrBlocks: ["10.0.0.0/16"],
servicesIpv4CidrBlocks: ["10.1.0.0/16"],
},
fleet: {
project: project.then(project => `projects/${project.number}`),
},
labels: {
my_key: "my_val",
other_key: "other_val",
},
});
import pulumi
import pulumi_gcp as gcp
project = gcp.organizations.get_project()
default = gcp.edgecontainer.Cluster("default",
name="basic-cluster",
location="us-central1",
authorization={
"admin_users": {
"username": "admin@hashicorptest.com",
},
},
networking={
"cluster_ipv4_cidr_blocks": ["10.0.0.0/16"],
"services_ipv4_cidr_blocks": ["10.1.0.0/16"],
},
fleet={
"project": f"projects/{project.number}",
},
labels={
"my_key": "my_val",
"other_key": "other_val",
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var project = Gcp.Organizations.GetProject.Invoke();
var @default = new Gcp.EdgeContainer.Cluster("default", new()
{
Name = "basic-cluster",
Location = "us-central1",
Authorization = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationArgs
{
AdminUsers = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationAdminUsersArgs
{
Username = "admin@hashicorptest.com",
},
},
Networking = new Gcp.EdgeContainer.Inputs.ClusterNetworkingArgs
{
ClusterIpv4CidrBlocks = new[]
{
"10.0.0.0/16",
},
ServicesIpv4CidrBlocks = new[]
{
"10.1.0.0/16",
},
},
Fleet = new Gcp.EdgeContainer.Inputs.ClusterFleetArgs
{
Project = $"projects/{project.Apply(getProjectResult => getProjectResult.Number)}",
},
Labels =
{
{ "my_key", "my_val" },
{ "other_key", "other_val" },
},
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/edgecontainer"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
project, err := organizations.LookupProject(ctx, nil, nil)
if err != nil {
return err
}
_, err = edgecontainer.NewCluster(ctx, "default", &edgecontainer.ClusterArgs{
Name: pulumi.String("basic-cluster"),
Location: pulumi.String("us-central1"),
Authorization: &edgecontainer.ClusterAuthorizationArgs{
AdminUsers: &edgecontainer.ClusterAuthorizationAdminUsersArgs{
Username: pulumi.String("admin@hashicorptest.com"),
},
},
Networking: &edgecontainer.ClusterNetworkingArgs{
ClusterIpv4CidrBlocks: pulumi.StringArray{
pulumi.String("10.0.0.0/16"),
},
ServicesIpv4CidrBlocks: pulumi.StringArray{
pulumi.String("10.1.0.0/16"),
},
},
Fleet: &edgecontainer.ClusterFleetArgs{
Project: pulumi.Sprintf("projects/%v", project.Number),
},
Labels: pulumi.StringMap{
"my_key": pulumi.String("my_val"),
"other_key": pulumi.String("other_val"),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.edgecontainer.Cluster;
import com.pulumi.gcp.edgecontainer.ClusterArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationAdminUsersArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterNetworkingArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterFleetArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var project = OrganizationsFunctions.getProject();
var default_ = new Cluster("default", ClusterArgs.builder()
.name("basic-cluster")
.location("us-central1")
.authorization(ClusterAuthorizationArgs.builder()
.adminUsers(ClusterAuthorizationAdminUsersArgs.builder()
.username("admin@hashicorptest.com")
.build())
.build())
.networking(ClusterNetworkingArgs.builder()
.clusterIpv4CidrBlocks("10.0.0.0/16")
.servicesIpv4CidrBlocks("10.1.0.0/16")
.build())
.fleet(ClusterFleetArgs.builder()
.project(String.format("projects/%s", project.applyValue(getProjectResult -> getProjectResult.number())))
.build())
.labels(Map.ofEntries(
Map.entry("my_key", "my_val"),
Map.entry("other_key", "other_val")
))
.build());
}
}
resources:
default:
type: gcp:edgecontainer:Cluster
properties:
name: basic-cluster
location: us-central1
authorization:
adminUsers:
username: admin@hashicorptest.com
networking:
clusterIpv4CidrBlocks:
- 10.0.0.0/16
servicesIpv4CidrBlocks:
- 10.1.0.0/16
fleet:
project: projects/${project.number}
labels:
my_key: my_val
other_key: other_val
variables:
project:
fn::invoke:
Function: gcp:organizations:getProject
Arguments: {}
Edgecontainer Cluster With Maintenance Window
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const project = gcp.organizations.getProject({});
const _default = new gcp.edgecontainer.Cluster("default", {
name: "cluster-with-maintenance",
location: "us-central1",
authorization: {
adminUsers: {
username: "admin@hashicorptest.com",
},
},
networking: {
clusterIpv4CidrBlocks: ["10.0.0.0/16"],
servicesIpv4CidrBlocks: ["10.1.0.0/16"],
},
fleet: {
project: project.then(project => `projects/${project.number}`),
},
maintenancePolicy: {
window: {
recurringWindow: {
window: {
startTime: "2023-01-01T08:00:00Z",
endTime: "2023-01-01T17:00:00Z",
},
recurrence: "FREQ=WEEKLY;BYDAY=SA",
},
},
},
});
import pulumi
import pulumi_gcp as gcp
project = gcp.organizations.get_project()
default = gcp.edgecontainer.Cluster("default",
name="cluster-with-maintenance",
location="us-central1",
authorization={
"admin_users": {
"username": "admin@hashicorptest.com",
},
},
networking={
"cluster_ipv4_cidr_blocks": ["10.0.0.0/16"],
"services_ipv4_cidr_blocks": ["10.1.0.0/16"],
},
fleet={
"project": f"projects/{project.number}",
},
maintenance_policy={
"window": {
"recurring_window": {
"window": {
"start_time": "2023-01-01T08:00:00Z",
"end_time": "2023-01-01T17:00:00Z",
},
"recurrence": "FREQ=WEEKLY;BYDAY=SA",
},
},
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var project = Gcp.Organizations.GetProject.Invoke();
var @default = new Gcp.EdgeContainer.Cluster("default", new()
{
Name = "cluster-with-maintenance",
Location = "us-central1",
Authorization = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationArgs
{
AdminUsers = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationAdminUsersArgs
{
Username = "admin@hashicorptest.com",
},
},
Networking = new Gcp.EdgeContainer.Inputs.ClusterNetworkingArgs
{
ClusterIpv4CidrBlocks = new[]
{
"10.0.0.0/16",
},
ServicesIpv4CidrBlocks = new[]
{
"10.1.0.0/16",
},
},
Fleet = new Gcp.EdgeContainer.Inputs.ClusterFleetArgs
{
Project = $"projects/{project.Apply(getProjectResult => getProjectResult.Number)}",
},
MaintenancePolicy = new Gcp.EdgeContainer.Inputs.ClusterMaintenancePolicyArgs
{
Window = new Gcp.EdgeContainer.Inputs.ClusterMaintenancePolicyWindowArgs
{
RecurringWindow = new Gcp.EdgeContainer.Inputs.ClusterMaintenancePolicyWindowRecurringWindowArgs
{
Window = new Gcp.EdgeContainer.Inputs.ClusterMaintenancePolicyWindowRecurringWindowWindowArgs
{
StartTime = "2023-01-01T08:00:00Z",
EndTime = "2023-01-01T17:00:00Z",
},
Recurrence = "FREQ=WEEKLY;BYDAY=SA",
},
},
},
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/edgecontainer"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
project, err := organizations.LookupProject(ctx, nil, nil)
if err != nil {
return err
}
_, err = edgecontainer.NewCluster(ctx, "default", &edgecontainer.ClusterArgs{
Name: pulumi.String("cluster-with-maintenance"),
Location: pulumi.String("us-central1"),
Authorization: &edgecontainer.ClusterAuthorizationArgs{
AdminUsers: &edgecontainer.ClusterAuthorizationAdminUsersArgs{
Username: pulumi.String("admin@hashicorptest.com"),
},
},
Networking: &edgecontainer.ClusterNetworkingArgs{
ClusterIpv4CidrBlocks: pulumi.StringArray{
pulumi.String("10.0.0.0/16"),
},
ServicesIpv4CidrBlocks: pulumi.StringArray{
pulumi.String("10.1.0.0/16"),
},
},
Fleet: &edgecontainer.ClusterFleetArgs{
Project: pulumi.Sprintf("projects/%v", project.Number),
},
MaintenancePolicy: &edgecontainer.ClusterMaintenancePolicyArgs{
Window: &edgecontainer.ClusterMaintenancePolicyWindowArgs{
RecurringWindow: &edgecontainer.ClusterMaintenancePolicyWindowRecurringWindowArgs{
Window: &edgecontainer.ClusterMaintenancePolicyWindowRecurringWindowWindowArgs{
StartTime: pulumi.String("2023-01-01T08:00:00Z"),
EndTime: pulumi.String("2023-01-01T17:00:00Z"),
},
Recurrence: pulumi.String("FREQ=WEEKLY;BYDAY=SA"),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.edgecontainer.Cluster;
import com.pulumi.gcp.edgecontainer.ClusterArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationAdminUsersArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterNetworkingArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterFleetArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterMaintenancePolicyArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterMaintenancePolicyWindowArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterMaintenancePolicyWindowRecurringWindowArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterMaintenancePolicyWindowRecurringWindowWindowArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var project = OrganizationsFunctions.getProject();
var default_ = new Cluster("default", ClusterArgs.builder()
.name("cluster-with-maintenance")
.location("us-central1")
.authorization(ClusterAuthorizationArgs.builder()
.adminUsers(ClusterAuthorizationAdminUsersArgs.builder()
.username("admin@hashicorptest.com")
.build())
.build())
.networking(ClusterNetworkingArgs.builder()
.clusterIpv4CidrBlocks("10.0.0.0/16")
.servicesIpv4CidrBlocks("10.1.0.0/16")
.build())
.fleet(ClusterFleetArgs.builder()
.project(String.format("projects/%s", project.applyValue(getProjectResult -> getProjectResult.number())))
.build())
.maintenancePolicy(ClusterMaintenancePolicyArgs.builder()
.window(ClusterMaintenancePolicyWindowArgs.builder()
.recurringWindow(ClusterMaintenancePolicyWindowRecurringWindowArgs.builder()
.window(ClusterMaintenancePolicyWindowRecurringWindowWindowArgs.builder()
.startTime("2023-01-01T08:00:00Z")
.endTime("2023-01-01T17:00:00Z")
.build())
.recurrence("FREQ=WEEKLY;BYDAY=SA")
.build())
.build())
.build())
.build());
}
}
resources:
default:
type: gcp:edgecontainer:Cluster
properties:
name: cluster-with-maintenance
location: us-central1
authorization:
adminUsers:
username: admin@hashicorptest.com
networking:
clusterIpv4CidrBlocks:
- 10.0.0.0/16
servicesIpv4CidrBlocks:
- 10.1.0.0/16
fleet:
project: projects/${project.number}
maintenancePolicy:
window:
recurringWindow:
window:
startTime: 2023-01-01T08:00:00Z
endTime: 2023-01-01T17:00:00Z
recurrence: FREQ=WEEKLY;BYDAY=SA
variables:
project:
fn::invoke:
Function: gcp:organizations:getProject
Arguments: {}
Edgecontainer Local Control Plane Cluster
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const project = gcp.organizations.getProject({});
const _default = new gcp.edgecontainer.Cluster("default", {
name: "local-control-plane-cluster",
location: "us-central1",
authorization: {
adminUsers: {
username: "admin@hashicorptest.com",
},
},
networking: {
clusterIpv4CidrBlocks: ["10.0.0.0/16"],
servicesIpv4CidrBlocks: ["10.1.0.0/16"],
},
fleet: {
project: project.then(project => `projects/${project.number}`),
},
externalLoadBalancerIpv4AddressPools: ["10.100.0.0-10.100.0.10"],
controlPlane: {
local: {
nodeLocation: "us-central1-edge-example-edgesite",
nodeCount: 1,
machineFilter: "machine-name",
sharedDeploymentPolicy: "ALLOWED",
},
},
});
import pulumi
import pulumi_gcp as gcp
project = gcp.organizations.get_project()
default = gcp.edgecontainer.Cluster("default",
name="local-control-plane-cluster",
location="us-central1",
authorization={
"admin_users": {
"username": "admin@hashicorptest.com",
},
},
networking={
"cluster_ipv4_cidr_blocks": ["10.0.0.0/16"],
"services_ipv4_cidr_blocks": ["10.1.0.0/16"],
},
fleet={
"project": f"projects/{project.number}",
},
external_load_balancer_ipv4_address_pools=["10.100.0.0-10.100.0.10"],
control_plane={
"local": {
"node_location": "us-central1-edge-example-edgesite",
"node_count": 1,
"machine_filter": "machine-name",
"shared_deployment_policy": "ALLOWED",
},
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var project = Gcp.Organizations.GetProject.Invoke();
var @default = new Gcp.EdgeContainer.Cluster("default", new()
{
Name = "local-control-plane-cluster",
Location = "us-central1",
Authorization = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationArgs
{
AdminUsers = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationAdminUsersArgs
{
Username = "admin@hashicorptest.com",
},
},
Networking = new Gcp.EdgeContainer.Inputs.ClusterNetworkingArgs
{
ClusterIpv4CidrBlocks = new[]
{
"10.0.0.0/16",
},
ServicesIpv4CidrBlocks = new[]
{
"10.1.0.0/16",
},
},
Fleet = new Gcp.EdgeContainer.Inputs.ClusterFleetArgs
{
Project = $"projects/{project.Apply(getProjectResult => getProjectResult.Number)}",
},
ExternalLoadBalancerIpv4AddressPools = new[]
{
"10.100.0.0-10.100.0.10",
},
ControlPlane = new Gcp.EdgeContainer.Inputs.ClusterControlPlaneArgs
{
Local = new Gcp.EdgeContainer.Inputs.ClusterControlPlaneLocalArgs
{
NodeLocation = "us-central1-edge-example-edgesite",
NodeCount = 1,
MachineFilter = "machine-name",
SharedDeploymentPolicy = "ALLOWED",
},
},
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/edgecontainer"
"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
project, err := organizations.LookupProject(ctx, nil, nil)
if err != nil {
return err
}
_, err = edgecontainer.NewCluster(ctx, "default", &edgecontainer.ClusterArgs{
Name: pulumi.String("local-control-plane-cluster"),
Location: pulumi.String("us-central1"),
Authorization: &edgecontainer.ClusterAuthorizationArgs{
AdminUsers: &edgecontainer.ClusterAuthorizationAdminUsersArgs{
Username: pulumi.String("admin@hashicorptest.com"),
},
},
Networking: &edgecontainer.ClusterNetworkingArgs{
ClusterIpv4CidrBlocks: pulumi.StringArray{
pulumi.String("10.0.0.0/16"),
},
ServicesIpv4CidrBlocks: pulumi.StringArray{
pulumi.String("10.1.0.0/16"),
},
},
Fleet: &edgecontainer.ClusterFleetArgs{
Project: pulumi.Sprintf("projects/%v", project.Number),
},
ExternalLoadBalancerIpv4AddressPools: pulumi.StringArray{
pulumi.String("10.100.0.0-10.100.0.10"),
},
ControlPlane: &edgecontainer.ClusterControlPlaneArgs{
Local: &edgecontainer.ClusterControlPlaneLocalArgs{
NodeLocation: pulumi.String("us-central1-edge-example-edgesite"),
NodeCount: pulumi.Int(1),
MachineFilter: pulumi.String("machine-name"),
SharedDeploymentPolicy: pulumi.String("ALLOWED"),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.edgecontainer.Cluster;
import com.pulumi.gcp.edgecontainer.ClusterArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationAdminUsersArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterNetworkingArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterFleetArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterControlPlaneArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterControlPlaneLocalArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var project = OrganizationsFunctions.getProject();
var default_ = new Cluster("default", ClusterArgs.builder()
.name("local-control-plane-cluster")
.location("us-central1")
.authorization(ClusterAuthorizationArgs.builder()
.adminUsers(ClusterAuthorizationAdminUsersArgs.builder()
.username("admin@hashicorptest.com")
.build())
.build())
.networking(ClusterNetworkingArgs.builder()
.clusterIpv4CidrBlocks("10.0.0.0/16")
.servicesIpv4CidrBlocks("10.1.0.0/16")
.build())
.fleet(ClusterFleetArgs.builder()
.project(String.format("projects/%s", project.applyValue(getProjectResult -> getProjectResult.number())))
.build())
.externalLoadBalancerIpv4AddressPools("10.100.0.0-10.100.0.10")
.controlPlane(ClusterControlPlaneArgs.builder()
.local(ClusterControlPlaneLocalArgs.builder()
.nodeLocation("us-central1-edge-example-edgesite")
.nodeCount(1)
.machineFilter("machine-name")
.sharedDeploymentPolicy("ALLOWED")
.build())
.build())
.build());
}
}
resources:
default:
type: gcp:edgecontainer:Cluster
properties:
name: local-control-plane-cluster
location: us-central1
authorization:
adminUsers:
username: admin@hashicorptest.com
networking:
clusterIpv4CidrBlocks:
- 10.0.0.0/16
servicesIpv4CidrBlocks:
- 10.1.0.0/16
fleet:
project: projects/${project.number}
externalLoadBalancerIpv4AddressPools:
- 10.100.0.0-10.100.0.10
controlPlane:
local:
nodeLocation: us-central1-edge-example-edgesite
nodeCount: 1
machineFilter: machine-name
sharedDeploymentPolicy: ALLOWED
variables:
project:
fn::invoke:
Function: gcp:organizations:getProject
Arguments: {}
Import
Cluster can be imported using any of these accepted formats:
projects/{{project}}/locations/{{location}}/clusters/{{name}}
{{project}}/{{location}}/{{name}}
{{location}}/{{name}}
When using thepulumi import
command, Cluster can be imported using one of the formats above. For example:
$ pulumi import gcp:edgecontainer/cluster:Cluster default projects/{{project}}/locations/{{location}}/clusters/{{name}}
$ pulumi import gcp:edgecontainer/cluster:Cluster default {{project}}/{{location}}/{{name}}
$ pulumi import gcp:edgecontainer/cluster:Cluster default {{location}}/{{name}}
Constructors
Properties
RBAC policy that will be applied and managed by GEC. Structure is documented below.
The configuration of the cluster control plane.
Remote control plane disk encryption options. This field is only used when enabling CMEK support.
The default maximum number of pods per node used if a maximum value is not specified explicitly for a node pool in this cluster. If unspecified, the Kubernetes default value will be used.
Address pools for cluster data plane external load balancing.
Fleet related configuration. Fleets are a Google Cloud concept for logically organizing clusters, letting you use and manage multi-cluster capabilities and apply consistent policies across your systems. Structure is documented below.
Cluster-wide maintenance policy configuration.
Fleet related configuration. Fleets are a Google Cloud concept for logically organizing clusters, letting you use and manage multi-cluster capabilities and apply consistent policies across your systems. Structure is documented below.
The release channel a cluster is subscribed to. Possible values: "RELEASE_CHANNEL_UNSPECIFIED", "NONE", "REGULAR"
Config that customers are allowed to define for GDCE system add-ons.
(Output) The target version of the cluster.