Spark Cluster Args
Manages a HDInsight Spark Cluster.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
const example = new azure.core.ResourceGroup("example", {
name: "example-resources",
location: "West Europe",
});
const exampleAccount = new azure.storage.Account("example", {
name: "hdinsightstor",
resourceGroupName: example.name,
location: example.location,
accountTier: "Standard",
accountReplicationType: "LRS",
});
const exampleContainer = new azure.storage.Container("example", {
name: "hdinsight",
storageAccountName: exampleAccount.name,
containerAccessType: "private",
});
const exampleSparkCluster = new azure.hdinsight.SparkCluster("example", {
name: "example-hdicluster",
resourceGroupName: example.name,
location: example.location,
clusterVersion: "3.6",
tier: "Standard",
componentVersion: {
spark: "2.3",
},
gateway: {
username: "acctestusrgw",
password: "Password123!",
},
storageAccounts: [{
storageContainerId: exampleContainer.id,
storageAccountKey: exampleAccount.primaryAccessKey,
isDefault: true,
}],
roles: {
headNode: {
vmSize: "Standard_A3",
username: "acctestusrvm",
password: "AccTestvdSC4daf986!",
},
workerNode: {
vmSize: "Standard_A3",
username: "acctestusrvm",
password: "AccTestvdSC4daf986!",
targetInstanceCount: 3,
},
zookeeperNode: {
vmSize: "Medium",
username: "acctestusrvm",
password: "AccTestvdSC4daf986!",
},
},
});
import pulumi
import pulumi_azure as azure
example = azure.core.ResourceGroup("example",
name="example-resources",
location="West Europe")
example_account = azure.storage.Account("example",
name="hdinsightstor",
resource_group_name=example.name,
location=example.location,
account_tier="Standard",
account_replication_type="LRS")
example_container = azure.storage.Container("example",
name="hdinsight",
storage_account_name=example_account.name,
container_access_type="private")
example_spark_cluster = azure.hdinsight.SparkCluster("example",
name="example-hdicluster",
resource_group_name=example.name,
location=example.location,
cluster_version="3.6",
tier="Standard",
component_version={
"spark": "2.3",
},
gateway={
"username": "acctestusrgw",
"password": "Password123!",
},
storage_accounts=[{
"storage_container_id": example_container.id,
"storage_account_key": example_account.primary_access_key,
"is_default": True,
}],
roles={
"head_node": {
"vm_size": "Standard_A3",
"username": "acctestusrvm",
"password": "AccTestvdSC4daf986!",
},
"worker_node": {
"vm_size": "Standard_A3",
"username": "acctestusrvm",
"password": "AccTestvdSC4daf986!",
"target_instance_count": 3,
},
"zookeeper_node": {
"vm_size": "Medium",
"username": "acctestusrvm",
"password": "AccTestvdSC4daf986!",
},
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Azure = Pulumi.Azure;
return await Deployment.RunAsync(() =>
{
var example = new Azure.Core.ResourceGroup("example", new()
{
Name = "example-resources",
Location = "West Europe",
});
var exampleAccount = new Azure.Storage.Account("example", new()
{
Name = "hdinsightstor",
ResourceGroupName = example.Name,
Location = example.Location,
AccountTier = "Standard",
AccountReplicationType = "LRS",
});
var exampleContainer = new Azure.Storage.Container("example", new()
{
Name = "hdinsight",
StorageAccountName = exampleAccount.Name,
ContainerAccessType = "private",
});
var exampleSparkCluster = new Azure.HDInsight.SparkCluster("example", new()
{
Name = "example-hdicluster",
ResourceGroupName = example.Name,
Location = example.Location,
ClusterVersion = "3.6",
Tier = "Standard",
ComponentVersion = new Azure.HDInsight.Inputs.SparkClusterComponentVersionArgs
{
Spark = "2.3",
},
Gateway = new Azure.HDInsight.Inputs.SparkClusterGatewayArgs
{
Username = "acctestusrgw",
Password = "Password123!",
},
StorageAccounts = new[]
{
new Azure.HDInsight.Inputs.SparkClusterStorageAccountArgs
{
StorageContainerId = exampleContainer.Id,
StorageAccountKey = exampleAccount.PrimaryAccessKey,
IsDefault = true,
},
},
Roles = new Azure.HDInsight.Inputs.SparkClusterRolesArgs
{
HeadNode = new Azure.HDInsight.Inputs.SparkClusterRolesHeadNodeArgs
{
VmSize = "Standard_A3",
Username = "acctestusrvm",
Password = "AccTestvdSC4daf986!",
},
WorkerNode = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeArgs
{
VmSize = "Standard_A3",
Username = "acctestusrvm",
Password = "AccTestvdSC4daf986!",
TargetInstanceCount = 3,
},
ZookeeperNode = new Azure.HDInsight.Inputs.SparkClusterRolesZookeeperNodeArgs
{
VmSize = "Medium",
Username = "acctestusrvm",
Password = "AccTestvdSC4daf986!",
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-azure/sdk/v6/go/azure/core"
"github.com/pulumi/pulumi-azure/sdk/v6/go/azure/hdinsight"
"github.com/pulumi/pulumi-azure/sdk/v6/go/azure/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
Name: pulumi.String("example-resources"),
Location: pulumi.String("West Europe"),
})
if err != nil {
return err
}
exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
Name: pulumi.String("hdinsightstor"),
ResourceGroupName: example.Name,
Location: example.Location,
AccountTier: pulumi.String("Standard"),
AccountReplicationType: pulumi.String("LRS"),
})
if err != nil {
return err
}
exampleContainer, err := storage.NewContainer(ctx, "example", &storage.ContainerArgs{
Name: pulumi.String("hdinsight"),
StorageAccountName: exampleAccount.Name,
ContainerAccessType: pulumi.String("private"),
})
if err != nil {
return err
}
_, err = hdinsight.NewSparkCluster(ctx, "example", &hdinsight.SparkClusterArgs{
Name: pulumi.String("example-hdicluster"),
ResourceGroupName: example.Name,
Location: example.Location,
ClusterVersion: pulumi.String("3.6"),
Tier: pulumi.String("Standard"),
ComponentVersion: &hdinsight.SparkClusterComponentVersionArgs{
Spark: pulumi.String("2.3"),
},
Gateway: &hdinsight.SparkClusterGatewayArgs{
Username: pulumi.String("acctestusrgw"),
Password: pulumi.String("Password123!"),
},
StorageAccounts: hdinsight.SparkClusterStorageAccountArray{
&hdinsight.SparkClusterStorageAccountArgs{
StorageContainerId: exampleContainer.ID(),
StorageAccountKey: exampleAccount.PrimaryAccessKey,
IsDefault: pulumi.Bool(true),
},
},
Roles: &hdinsight.SparkClusterRolesArgs{
HeadNode: &hdinsight.SparkClusterRolesHeadNodeArgs{
VmSize: pulumi.String("Standard_A3"),
Username: pulumi.String("acctestusrvm"),
Password: pulumi.String("AccTestvdSC4daf986!"),
},
WorkerNode: &hdinsight.SparkClusterRolesWorkerNodeArgs{
VmSize: pulumi.String("Standard_A3"),
Username: pulumi.String("acctestusrvm"),
Password: pulumi.String("AccTestvdSC4daf986!"),
TargetInstanceCount: pulumi.Int(3),
},
ZookeeperNode: &hdinsight.SparkClusterRolesZookeeperNodeArgs{
VmSize: pulumi.String("Medium"),
Username: pulumi.String("acctestusrvm"),
Password: pulumi.String("AccTestvdSC4daf986!"),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azure.core.ResourceGroup;
import com.pulumi.azure.core.ResourceGroupArgs;
import com.pulumi.azure.storage.Account;
import com.pulumi.azure.storage.AccountArgs;
import com.pulumi.azure.storage.Container;
import com.pulumi.azure.storage.ContainerArgs;
import com.pulumi.azure.hdinsight.SparkCluster;
import com.pulumi.azure.hdinsight.SparkClusterArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterComponentVersionArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterGatewayArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterStorageAccountArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesHeadNodeArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesWorkerNodeArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesZookeeperNodeArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new ResourceGroup("example", ResourceGroupArgs.builder()
.name("example-resources")
.location("West Europe")
.build());
var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
.name("hdinsightstor")
.resourceGroupName(example.name())
.location(example.location())
.accountTier("Standard")
.accountReplicationType("LRS")
.build());
var exampleContainer = new Container("exampleContainer", ContainerArgs.builder()
.name("hdinsight")
.storageAccountName(exampleAccount.name())
.containerAccessType("private")
.build());
var exampleSparkCluster = new SparkCluster("exampleSparkCluster", SparkClusterArgs.builder()
.name("example-hdicluster")
.resourceGroupName(example.name())
.location(example.location())
.clusterVersion("3.6")
.tier("Standard")
.componentVersion(SparkClusterComponentVersionArgs.builder()
.spark("2.3")
.build())
.gateway(SparkClusterGatewayArgs.builder()
.username("acctestusrgw")
.password("Password123!")
.build())
.storageAccounts(SparkClusterStorageAccountArgs.builder()
.storageContainerId(exampleContainer.id())
.storageAccountKey(exampleAccount.primaryAccessKey())
.isDefault(true)
.build())
.roles(SparkClusterRolesArgs.builder()
.headNode(SparkClusterRolesHeadNodeArgs.builder()
.vmSize("Standard_A3")
.username("acctestusrvm")
.password("AccTestvdSC4daf986!")
.build())
.workerNode(SparkClusterRolesWorkerNodeArgs.builder()
.vmSize("Standard_A3")
.username("acctestusrvm")
.password("AccTestvdSC4daf986!")
.targetInstanceCount(3)
.build())
.zookeeperNode(SparkClusterRolesZookeeperNodeArgs.builder()
.vmSize("Medium")
.username("acctestusrvm")
.password("AccTestvdSC4daf986!")
.build())
.build())
.build());
}
}
resources:
example:
type: azure:core:ResourceGroup
properties:
name: example-resources
location: West Europe
exampleAccount:
type: azure:storage:Account
name: example
properties:
name: hdinsightstor
resourceGroupName: ${example.name}
location: ${example.location}
accountTier: Standard
accountReplicationType: LRS
exampleContainer:
type: azure:storage:Container
name: example
properties:
name: hdinsight
storageAccountName: ${exampleAccount.name}
containerAccessType: private
exampleSparkCluster:
type: azure:hdinsight:SparkCluster
name: example
properties:
name: example-hdicluster
resourceGroupName: ${example.name}
location: ${example.location}
clusterVersion: '3.6'
tier: Standard
componentVersion:
spark: '2.3'
gateway:
username: acctestusrgw
password: Password123!
storageAccounts:
- storageContainerId: ${exampleContainer.id}
storageAccountKey: ${exampleAccount.primaryAccessKey}
isDefault: true
roles:
headNode:
vmSize: Standard_A3
username: acctestusrvm
password: AccTestvdSC4daf986!
workerNode:
vmSize: Standard_A3
username: acctestusrvm
password: AccTestvdSC4daf986!
targetInstanceCount: 3
zookeeperNode:
vmSize: Medium
username: acctestusrvm
password: AccTestvdSC4daf986!
API Providers
This resource uses the following Azure API Providers:
Microsoft.HDInsight
: 2021-06-01
Import
HDInsight Spark Clusters can be imported using the resource id
, e.g.
$ pulumi import azure:hdinsight/sparkCluster:SparkCluster example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.HDInsight/clusters/cluster1
Constructors
Properties
Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
A component_version
block as defined below.
A compute_isolation
block as defined below.
One or more disk_encryption
block as defined below.
Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
An extension
block as defined below.
A gateway
block as defined below.
A metastores
block as defined below.
A monitor
block as defined below.
A network
block as defined below.
A private_link_configuration
block as defined below.
Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
A roles
block as defined below.
A security_profile
block as defined below. Changing this forces a new resource to be created.
A storage_account_gen2
block as defined below.
One or more storage_account
block as defined below.
The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.