SparkClusterArgs

data class SparkClusterArgs(val clusterVersion: Output<String>? = null, val componentVersion: Output<SparkClusterComponentVersionArgs>? = null, val computeIsolation: Output<SparkClusterComputeIsolationArgs>? = null, val diskEncryptions: Output<List<SparkClusterDiskEncryptionArgs>>? = null, val encryptionInTransitEnabled: Output<Boolean>? = null, val extension: Output<SparkClusterExtensionArgs>? = null, val gateway: Output<SparkClusterGatewayArgs>? = null, val location: Output<String>? = null, val metastores: Output<SparkClusterMetastoresArgs>? = null, val monitor: Output<SparkClusterMonitorArgs>? = null, val name: Output<String>? = null, val network: Output<SparkClusterNetworkArgs>? = null, val privateLinkConfiguration: Output<SparkClusterPrivateLinkConfigurationArgs>? = null, val resourceGroupName: Output<String>? = null, val roles: Output<SparkClusterRolesArgs>? = null, val securityProfile: Output<SparkClusterSecurityProfileArgs>? = null, val storageAccountGen2: Output<SparkClusterStorageAccountGen2Args>? = null, val storageAccounts: Output<List<SparkClusterStorageAccountArgs>>? = null, val tags: Output<Map<String, String>>? = null, val tier: Output<String>? = null, val tlsMinVersion: Output<String>? = null, val zones: Output<List<String>>? = null) : ConvertibleToJava<SparkClusterArgs>

Manages a HDInsight Spark Cluster.

Example Usage

import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
const example = new azure.core.ResourceGroup("example", {
name: "example-resources",
location: "West Europe",
});
const exampleAccount = new azure.storage.Account("example", {
name: "hdinsightstor",
resourceGroupName: example.name,
location: example.location,
accountTier: "Standard",
accountReplicationType: "LRS",
});
const exampleContainer = new azure.storage.Container("example", {
name: "hdinsight",
storageAccountName: exampleAccount.name,
containerAccessType: "private",
});
const exampleSparkCluster = new azure.hdinsight.SparkCluster("example", {
name: "example-hdicluster",
resourceGroupName: example.name,
location: example.location,
clusterVersion: "3.6",
tier: "Standard",
componentVersion: {
spark: "2.3",
},
gateway: {
username: "acctestusrgw",
password: "Password123!",
},
storageAccounts: [{
storageContainerId: exampleContainer.id,
storageAccountKey: exampleAccount.primaryAccessKey,
isDefault: true,
}],
roles: {
headNode: {
vmSize: "Standard_A3",
username: "acctestusrvm",
password: "AccTestvdSC4daf986!",
},
workerNode: {
vmSize: "Standard_A3",
username: "acctestusrvm",
password: "AccTestvdSC4daf986!",
targetInstanceCount: 3,
},
zookeeperNode: {
vmSize: "Medium",
username: "acctestusrvm",
password: "AccTestvdSC4daf986!",
},
},
});
import pulumi
import pulumi_azure as azure
example = azure.core.ResourceGroup("example",
name="example-resources",
location="West Europe")
example_account = azure.storage.Account("example",
name="hdinsightstor",
resource_group_name=example.name,
location=example.location,
account_tier="Standard",
account_replication_type="LRS")
example_container = azure.storage.Container("example",
name="hdinsight",
storage_account_name=example_account.name,
container_access_type="private")
example_spark_cluster = azure.hdinsight.SparkCluster("example",
name="example-hdicluster",
resource_group_name=example.name,
location=example.location,
cluster_version="3.6",
tier="Standard",
component_version={
"spark": "2.3",
},
gateway={
"username": "acctestusrgw",
"password": "Password123!",
},
storage_accounts=[{
"storage_container_id": example_container.id,
"storage_account_key": example_account.primary_access_key,
"is_default": True,
}],
roles={
"head_node": {
"vm_size": "Standard_A3",
"username": "acctestusrvm",
"password": "AccTestvdSC4daf986!",
},
"worker_node": {
"vm_size": "Standard_A3",
"username": "acctestusrvm",
"password": "AccTestvdSC4daf986!",
"target_instance_count": 3,
},
"zookeeper_node": {
"vm_size": "Medium",
"username": "acctestusrvm",
"password": "AccTestvdSC4daf986!",
},
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Azure = Pulumi.Azure;
return await Deployment.RunAsync(() =>
{
var example = new Azure.Core.ResourceGroup("example", new()
{
Name = "example-resources",
Location = "West Europe",
});
var exampleAccount = new Azure.Storage.Account("example", new()
{
Name = "hdinsightstor",
ResourceGroupName = example.Name,
Location = example.Location,
AccountTier = "Standard",
AccountReplicationType = "LRS",
});
var exampleContainer = new Azure.Storage.Container("example", new()
{
Name = "hdinsight",
StorageAccountName = exampleAccount.Name,
ContainerAccessType = "private",
});
var exampleSparkCluster = new Azure.HDInsight.SparkCluster("example", new()
{
Name = "example-hdicluster",
ResourceGroupName = example.Name,
Location = example.Location,
ClusterVersion = "3.6",
Tier = "Standard",
ComponentVersion = new Azure.HDInsight.Inputs.SparkClusterComponentVersionArgs
{
Spark = "2.3",
},
Gateway = new Azure.HDInsight.Inputs.SparkClusterGatewayArgs
{
Username = "acctestusrgw",
Password = "Password123!",
},
StorageAccounts = new[]
{
new Azure.HDInsight.Inputs.SparkClusterStorageAccountArgs
{
StorageContainerId = exampleContainer.Id,
StorageAccountKey = exampleAccount.PrimaryAccessKey,
IsDefault = true,
},
},
Roles = new Azure.HDInsight.Inputs.SparkClusterRolesArgs
{
HeadNode = new Azure.HDInsight.Inputs.SparkClusterRolesHeadNodeArgs
{
VmSize = "Standard_A3",
Username = "acctestusrvm",
Password = "AccTestvdSC4daf986!",
},
WorkerNode = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeArgs
{
VmSize = "Standard_A3",
Username = "acctestusrvm",
Password = "AccTestvdSC4daf986!",
TargetInstanceCount = 3,
},
ZookeeperNode = new Azure.HDInsight.Inputs.SparkClusterRolesZookeeperNodeArgs
{
VmSize = "Medium",
Username = "acctestusrvm",
Password = "AccTestvdSC4daf986!",
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-azure/sdk/v6/go/azure/core"
"github.com/pulumi/pulumi-azure/sdk/v6/go/azure/hdinsight"
"github.com/pulumi/pulumi-azure/sdk/v6/go/azure/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
Name: pulumi.String("example-resources"),
Location: pulumi.String("West Europe"),
})
if err != nil {
return err
}
exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
Name: pulumi.String("hdinsightstor"),
ResourceGroupName: example.Name,
Location: example.Location,
AccountTier: pulumi.String("Standard"),
AccountReplicationType: pulumi.String("LRS"),
})
if err != nil {
return err
}
exampleContainer, err := storage.NewContainer(ctx, "example", &storage.ContainerArgs{
Name: pulumi.String("hdinsight"),
StorageAccountName: exampleAccount.Name,
ContainerAccessType: pulumi.String("private"),
})
if err != nil {
return err
}
_, err = hdinsight.NewSparkCluster(ctx, "example", &hdinsight.SparkClusterArgs{
Name: pulumi.String("example-hdicluster"),
ResourceGroupName: example.Name,
Location: example.Location,
ClusterVersion: pulumi.String("3.6"),
Tier: pulumi.String("Standard"),
ComponentVersion: &hdinsight.SparkClusterComponentVersionArgs{
Spark: pulumi.String("2.3"),
},
Gateway: &hdinsight.SparkClusterGatewayArgs{
Username: pulumi.String("acctestusrgw"),
Password: pulumi.String("Password123!"),
},
StorageAccounts: hdinsight.SparkClusterStorageAccountArray{
&hdinsight.SparkClusterStorageAccountArgs{
StorageContainerId: exampleContainer.ID(),
StorageAccountKey: exampleAccount.PrimaryAccessKey,
IsDefault: pulumi.Bool(true),
},
},
Roles: &hdinsight.SparkClusterRolesArgs{
HeadNode: &hdinsight.SparkClusterRolesHeadNodeArgs{
VmSize: pulumi.String("Standard_A3"),
Username: pulumi.String("acctestusrvm"),
Password: pulumi.String("AccTestvdSC4daf986!"),
},
WorkerNode: &hdinsight.SparkClusterRolesWorkerNodeArgs{
VmSize: pulumi.String("Standard_A3"),
Username: pulumi.String("acctestusrvm"),
Password: pulumi.String("AccTestvdSC4daf986!"),
TargetInstanceCount: pulumi.Int(3),
},
ZookeeperNode: &hdinsight.SparkClusterRolesZookeeperNodeArgs{
VmSize: pulumi.String("Medium"),
Username: pulumi.String("acctestusrvm"),
Password: pulumi.String("AccTestvdSC4daf986!"),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azure.core.ResourceGroup;
import com.pulumi.azure.core.ResourceGroupArgs;
import com.pulumi.azure.storage.Account;
import com.pulumi.azure.storage.AccountArgs;
import com.pulumi.azure.storage.Container;
import com.pulumi.azure.storage.ContainerArgs;
import com.pulumi.azure.hdinsight.SparkCluster;
import com.pulumi.azure.hdinsight.SparkClusterArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterComponentVersionArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterGatewayArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterStorageAccountArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesHeadNodeArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesWorkerNodeArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesZookeeperNodeArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new ResourceGroup("example", ResourceGroupArgs.builder()
.name("example-resources")
.location("West Europe")
.build());
var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
.name("hdinsightstor")
.resourceGroupName(example.name())
.location(example.location())
.accountTier("Standard")
.accountReplicationType("LRS")
.build());
var exampleContainer = new Container("exampleContainer", ContainerArgs.builder()
.name("hdinsight")
.storageAccountName(exampleAccount.name())
.containerAccessType("private")
.build());
var exampleSparkCluster = new SparkCluster("exampleSparkCluster", SparkClusterArgs.builder()
.name("example-hdicluster")
.resourceGroupName(example.name())
.location(example.location())
.clusterVersion("3.6")
.tier("Standard")
.componentVersion(SparkClusterComponentVersionArgs.builder()
.spark("2.3")
.build())
.gateway(SparkClusterGatewayArgs.builder()
.username("acctestusrgw")
.password("Password123!")
.build())
.storageAccounts(SparkClusterStorageAccountArgs.builder()
.storageContainerId(exampleContainer.id())
.storageAccountKey(exampleAccount.primaryAccessKey())
.isDefault(true)
.build())
.roles(SparkClusterRolesArgs.builder()
.headNode(SparkClusterRolesHeadNodeArgs.builder()
.vmSize("Standard_A3")
.username("acctestusrvm")
.password("AccTestvdSC4daf986!")
.build())
.workerNode(SparkClusterRolesWorkerNodeArgs.builder()
.vmSize("Standard_A3")
.username("acctestusrvm")
.password("AccTestvdSC4daf986!")
.targetInstanceCount(3)
.build())
.zookeeperNode(SparkClusterRolesZookeeperNodeArgs.builder()
.vmSize("Medium")
.username("acctestusrvm")
.password("AccTestvdSC4daf986!")
.build())
.build())
.build());
}
}
resources:
example:
type: azure:core:ResourceGroup
properties:
name: example-resources
location: West Europe
exampleAccount:
type: azure:storage:Account
name: example
properties:
name: hdinsightstor
resourceGroupName: ${example.name}
location: ${example.location}
accountTier: Standard
accountReplicationType: LRS
exampleContainer:
type: azure:storage:Container
name: example
properties:
name: hdinsight
storageAccountName: ${exampleAccount.name}
containerAccessType: private
exampleSparkCluster:
type: azure:hdinsight:SparkCluster
name: example
properties:
name: example-hdicluster
resourceGroupName: ${example.name}
location: ${example.location}
clusterVersion: '3.6'
tier: Standard
componentVersion:
spark: '2.3'
gateway:
username: acctestusrgw
password: Password123!
storageAccounts:
- storageContainerId: ${exampleContainer.id}
storageAccountKey: ${exampleAccount.primaryAccessKey}
isDefault: true
roles:
headNode:
vmSize: Standard_A3
username: acctestusrvm
password: AccTestvdSC4daf986!
workerNode:
vmSize: Standard_A3
username: acctestusrvm
password: AccTestvdSC4daf986!
targetInstanceCount: 3
zookeeperNode:
vmSize: Medium
username: acctestusrvm
password: AccTestvdSC4daf986!

API Providers

This resource uses the following Azure API Providers:

  • Microsoft.HDInsight: 2021-06-01

Import

HDInsight Spark Clusters can be imported using the resource id, e.g.

$ pulumi import azure:hdinsight/sparkCluster:SparkCluster example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.HDInsight/clusters/cluster1

Constructors

Link copied to clipboard
constructor(clusterVersion: Output<String>? = null, componentVersion: Output<SparkClusterComponentVersionArgs>? = null, computeIsolation: Output<SparkClusterComputeIsolationArgs>? = null, diskEncryptions: Output<List<SparkClusterDiskEncryptionArgs>>? = null, encryptionInTransitEnabled: Output<Boolean>? = null, extension: Output<SparkClusterExtensionArgs>? = null, gateway: Output<SparkClusterGatewayArgs>? = null, location: Output<String>? = null, metastores: Output<SparkClusterMetastoresArgs>? = null, monitor: Output<SparkClusterMonitorArgs>? = null, name: Output<String>? = null, network: Output<SparkClusterNetworkArgs>? = null, privateLinkConfiguration: Output<SparkClusterPrivateLinkConfigurationArgs>? = null, resourceGroupName: Output<String>? = null, roles: Output<SparkClusterRolesArgs>? = null, securityProfile: Output<SparkClusterSecurityProfileArgs>? = null, storageAccountGen2: Output<SparkClusterStorageAccountGen2Args>? = null, storageAccounts: Output<List<SparkClusterStorageAccountArgs>>? = null, tags: Output<Map<String, String>>? = null, tier: Output<String>? = null, tlsMinVersion: Output<String>? = null, zones: Output<List<String>>? = null)

Properties

Link copied to clipboard
val clusterVersion: Output<String>? = null

Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.

Link copied to clipboard

A component_version block as defined below.

Link copied to clipboard

A compute_isolation block as defined below.

Link copied to clipboard

One or more disk_encryption block as defined below.

Link copied to clipboard
val encryptionInTransitEnabled: Output<Boolean>? = null

Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.

Link copied to clipboard

An extension block as defined below.

Link copied to clipboard
val gateway: Output<SparkClusterGatewayArgs>? = null

A gateway block as defined below.

Link copied to clipboard
val location: Output<String>? = null

Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.

Link copied to clipboard

A metastores block as defined below.

Link copied to clipboard
val monitor: Output<SparkClusterMonitorArgs>? = null

A monitor block as defined below.

Link copied to clipboard
val name: Output<String>? = null

Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.

Link copied to clipboard
val network: Output<SparkClusterNetworkArgs>? = null

A network block as defined below.

Link copied to clipboard

A private_link_configuration block as defined below.

Link copied to clipboard
val resourceGroupName: Output<String>? = null

Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.

Link copied to clipboard
val roles: Output<SparkClusterRolesArgs>? = null

A roles block as defined below.

Link copied to clipboard

A security_profile block as defined below. Changing this forces a new resource to be created.

Link copied to clipboard

A storage_account_gen2 block as defined below.

Link copied to clipboard

One or more storage_account block as defined below.

Link copied to clipboard
val tags: Output<Map<String, String>>? = null

A map of Tags which should be assigned to this HDInsight Spark Cluster.

Link copied to clipboard
val tier: Output<String>? = null

Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.

Link copied to clipboard
val tlsMinVersion: Output<String>? = null

The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

Link copied to clipboard
val zones: Output<List<String>>? = null

A list of Availability Zones which should be used for this HDInsight Spark Cluster. Chaning this forces a new resource to be created.

Functions

Link copied to clipboard
open override fun toJava(): SparkClusterArgs