SparkPool

class SparkPool : KotlinCustomResource

Manages a Synapse Spark Pool.

Example Usage

import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
const example = new azure.core.ResourceGroup("example", {
name: "example-resources",
location: "West Europe",
});
const exampleAccount = new azure.storage.Account("example", {
name: "examplestorageacc",
resourceGroupName: example.name,
location: example.location,
accountTier: "Standard",
accountReplicationType: "LRS",
accountKind: "StorageV2",
isHnsEnabled: true,
});
const exampleDataLakeGen2Filesystem = new azure.storage.DataLakeGen2Filesystem("example", {
name: "example",
storageAccountId: exampleAccount.id,
});
const exampleWorkspace = new azure.synapse.Workspace("example", {
name: "example",
resourceGroupName: example.name,
location: example.location,
storageDataLakeGen2FilesystemId: exampleDataLakeGen2Filesystem.id,
sqlAdministratorLogin: "sqladminuser",
sqlAdministratorLoginPassword: "H@Sh1CoR3!",
identity: {
type: "SystemAssigned",
},
});
const exampleSparkPool = new azure.synapse.SparkPool("example", {
name: "example",
synapseWorkspaceId: exampleWorkspace.id,
nodeSizeFamily: "MemoryOptimized",
nodeSize: "Small",
cacheSize: 100,
autoScale: {
maxNodeCount: 50,
minNodeCount: 3,
},
autoPause: {
delayInMinutes: 15,
},
libraryRequirement: {
content: `appnope==0.1.0
beautifulsoup4==4.6.3
`,
filename: "requirements.txt",
},
sparkConfig: {
content: "spark.shuffle.spill true\n",
filename: "config.txt",
},
tags: {
ENV: "Production",
},
});
import pulumi
import pulumi_azure as azure
example = azure.core.ResourceGroup("example",
name="example-resources",
location="West Europe")
example_account = azure.storage.Account("example",
name="examplestorageacc",
resource_group_name=example.name,
location=example.location,
account_tier="Standard",
account_replication_type="LRS",
account_kind="StorageV2",
is_hns_enabled=True)
example_data_lake_gen2_filesystem = azure.storage.DataLakeGen2Filesystem("example",
name="example",
storage_account_id=example_account.id)
example_workspace = azure.synapse.Workspace("example",
name="example",
resource_group_name=example.name,
location=example.location,
storage_data_lake_gen2_filesystem_id=example_data_lake_gen2_filesystem.id,
sql_administrator_login="sqladminuser",
sql_administrator_login_password="H@Sh1CoR3!",
identity={
"type": "SystemAssigned",
})
example_spark_pool = azure.synapse.SparkPool("example",
name="example",
synapse_workspace_id=example_workspace.id,
node_size_family="MemoryOptimized",
node_size="Small",
cache_size=100,
auto_scale={
"max_node_count": 50,
"min_node_count": 3,
},
auto_pause={
"delay_in_minutes": 15,
},
library_requirement={
"content": """appnope==0.1.0
beautifulsoup4==4.6.3
""",
"filename": "requirements.txt",
},
spark_config={
"content": "spark.shuffle.spill true\n",
"filename": "config.txt",
},
tags={
"ENV": "Production",
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Azure = Pulumi.Azure;
return await Deployment.RunAsync(() =>
{
var example = new Azure.Core.ResourceGroup("example", new()
{
Name = "example-resources",
Location = "West Europe",
});
var exampleAccount = new Azure.Storage.Account("example", new()
{
Name = "examplestorageacc",
ResourceGroupName = example.Name,
Location = example.Location,
AccountTier = "Standard",
AccountReplicationType = "LRS",
AccountKind = "StorageV2",
IsHnsEnabled = true,
});
var exampleDataLakeGen2Filesystem = new Azure.Storage.DataLakeGen2Filesystem("example", new()
{
Name = "example",
StorageAccountId = exampleAccount.Id,
});
var exampleWorkspace = new Azure.Synapse.Workspace("example", new()
{
Name = "example",
ResourceGroupName = example.Name,
Location = example.Location,
StorageDataLakeGen2FilesystemId = exampleDataLakeGen2Filesystem.Id,
SqlAdministratorLogin = "sqladminuser",
SqlAdministratorLoginPassword = "H@Sh1CoR3!",
Identity = new Azure.Synapse.Inputs.WorkspaceIdentityArgs
{
Type = "SystemAssigned",
},
});
var exampleSparkPool = new Azure.Synapse.SparkPool("example", new()
{
Name = "example",
SynapseWorkspaceId = exampleWorkspace.Id,
NodeSizeFamily = "MemoryOptimized",
NodeSize = "Small",
CacheSize = 100,
AutoScale = new Azure.Synapse.Inputs.SparkPoolAutoScaleArgs
{
MaxNodeCount = 50,
MinNodeCount = 3,
},
AutoPause = new Azure.Synapse.Inputs.SparkPoolAutoPauseArgs
{
DelayInMinutes = 15,
},
LibraryRequirement = new Azure.Synapse.Inputs.SparkPoolLibraryRequirementArgs
{
Content = @"appnope==0.1.0
beautifulsoup4==4.6.3
",
Filename = "requirements.txt",
},
SparkConfig = new Azure.Synapse.Inputs.SparkPoolSparkConfigArgs
{
Content = @"spark.shuffle.spill true
",
Filename = "config.txt",
},
Tags =
{
{ "ENV", "Production" },
},
});
});
package main
import (
"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core"
"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/synapse"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
Name: pulumi.String("example-resources"),
Location: pulumi.String("West Europe"),
})
if err != nil {
return err
}
exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
Name: pulumi.String("examplestorageacc"),
ResourceGroupName: example.Name,
Location: example.Location,
AccountTier: pulumi.String("Standard"),
AccountReplicationType: pulumi.String("LRS"),
AccountKind: pulumi.String("StorageV2"),
IsHnsEnabled: pulumi.Bool(true),
})
if err != nil {
return err
}
exampleDataLakeGen2Filesystem, err := storage.NewDataLakeGen2Filesystem(ctx, "example", &storage.DataLakeGen2FilesystemArgs{
Name: pulumi.String("example"),
StorageAccountId: exampleAccount.ID(),
})
if err != nil {
return err
}
exampleWorkspace, err := synapse.NewWorkspace(ctx, "example", &synapse.WorkspaceArgs{
Name: pulumi.String("example"),
ResourceGroupName: example.Name,
Location: example.Location,
StorageDataLakeGen2FilesystemId: exampleDataLakeGen2Filesystem.ID(),
SqlAdministratorLogin: pulumi.String("sqladminuser"),
SqlAdministratorLoginPassword: pulumi.String("H@Sh1CoR3!"),
Identity: &synapse.WorkspaceIdentityArgs{
Type: pulumi.String("SystemAssigned"),
},
})
if err != nil {
return err
}
_, err = synapse.NewSparkPool(ctx, "example", &synapse.SparkPoolArgs{
Name: pulumi.String("example"),
SynapseWorkspaceId: exampleWorkspace.ID(),
NodeSizeFamily: pulumi.String("MemoryOptimized"),
NodeSize: pulumi.String("Small"),
CacheSize: pulumi.Int(100),
AutoScale: &synapse.SparkPoolAutoScaleArgs{
MaxNodeCount: pulumi.Int(50),
MinNodeCount: pulumi.Int(3),
},
AutoPause: &synapse.SparkPoolAutoPauseArgs{
DelayInMinutes: pulumi.Int(15),
},
LibraryRequirement: &synapse.SparkPoolLibraryRequirementArgs{
Content: pulumi.String("appnope==0.1.0\nbeautifulsoup4==4.6.3\n"),
Filename: pulumi.String("requirements.txt"),
},
SparkConfig: &synapse.SparkPoolSparkConfigArgs{
Content: pulumi.String("spark.shuffle.spill true\n"),
Filename: pulumi.String("config.txt"),
},
Tags: pulumi.StringMap{
"ENV": pulumi.String("Production"),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azure.core.ResourceGroup;
import com.pulumi.azure.core.ResourceGroupArgs;
import com.pulumi.azure.storage.Account;
import com.pulumi.azure.storage.AccountArgs;
import com.pulumi.azure.storage.DataLakeGen2Filesystem;
import com.pulumi.azure.storage.DataLakeGen2FilesystemArgs;
import com.pulumi.azure.synapse.Workspace;
import com.pulumi.azure.synapse.WorkspaceArgs;
import com.pulumi.azure.synapse.inputs.WorkspaceIdentityArgs;
import com.pulumi.azure.synapse.SparkPool;
import com.pulumi.azure.synapse.SparkPoolArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolAutoScaleArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolAutoPauseArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolLibraryRequirementArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolSparkConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new ResourceGroup("example", ResourceGroupArgs.builder()
.name("example-resources")
.location("West Europe")
.build());
var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
.name("examplestorageacc")
.resourceGroupName(example.name())
.location(example.location())
.accountTier("Standard")
.accountReplicationType("LRS")
.accountKind("StorageV2")
.isHnsEnabled("true")
.build());
var exampleDataLakeGen2Filesystem = new DataLakeGen2Filesystem("exampleDataLakeGen2Filesystem", DataLakeGen2FilesystemArgs.builder()
.name("example")
.storageAccountId(exampleAccount.id())
.build());
var exampleWorkspace = new Workspace("exampleWorkspace", WorkspaceArgs.builder()
.name("example")
.resourceGroupName(example.name())
.location(example.location())
.storageDataLakeGen2FilesystemId(exampleDataLakeGen2Filesystem.id())
.sqlAdministratorLogin("sqladminuser")
.sqlAdministratorLoginPassword("H@Sh1CoR3!")
.identity(WorkspaceIdentityArgs.builder()
.type("SystemAssigned")
.build())
.build());
var exampleSparkPool = new SparkPool("exampleSparkPool", SparkPoolArgs.builder()
.name("example")
.synapseWorkspaceId(exampleWorkspace.id())
.nodeSizeFamily("MemoryOptimized")
.nodeSize("Small")
.cacheSize(100)
.autoScale(SparkPoolAutoScaleArgs.builder()
.maxNodeCount(50)
.minNodeCount(3)
.build())
.autoPause(SparkPoolAutoPauseArgs.builder()
.delayInMinutes(15)
.build())
.libraryRequirement(SparkPoolLibraryRequirementArgs.builder()
.content("""
appnope==0.1.0
beautifulsoup4==4.6.3
""")
.filename("requirements.txt")
.build())
.sparkConfig(SparkPoolSparkConfigArgs.builder()
.content("""
spark.shuffle.spill true
""")
.filename("config.txt")
.build())
.tags(Map.of("ENV", "Production"))
.build());
}
}
resources:
example:
type: azure:core:ResourceGroup
properties:
name: example-resources
location: West Europe
exampleAccount:
type: azure:storage:Account
name: example
properties:
name: examplestorageacc
resourceGroupName: ${example.name}
location: ${example.location}
accountTier: Standard
accountReplicationType: LRS
accountKind: StorageV2
isHnsEnabled: 'true'
exampleDataLakeGen2Filesystem:
type: azure:storage:DataLakeGen2Filesystem
name: example
properties:
name: example
storageAccountId: ${exampleAccount.id}
exampleWorkspace:
type: azure:synapse:Workspace
name: example
properties:
name: example
resourceGroupName: ${example.name}
location: ${example.location}
storageDataLakeGen2FilesystemId: ${exampleDataLakeGen2Filesystem.id}
sqlAdministratorLogin: sqladminuser
sqlAdministratorLoginPassword: H@Sh1CoR3!
identity:
type: SystemAssigned
exampleSparkPool:
type: azure:synapse:SparkPool
name: example
properties:
name: example
synapseWorkspaceId: ${exampleWorkspace.id}
nodeSizeFamily: MemoryOptimized
nodeSize: Small
cacheSize: 100
autoScale:
maxNodeCount: 50
minNodeCount: 3
autoPause:
delayInMinutes: 15
libraryRequirement:
content: |
appnope==0.1.0
beautifulsoup4==4.6.3
filename: requirements.txt
sparkConfig:
content: |
spark.shuffle.spill true
filename: config.txt
tags:
ENV: Production

Import

Synapse Spark Pool can be imported using the resource id, e.g.

$ pulumi import azure:synapse/sparkPool:SparkPool example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Synapse/workspaces/workspace1/bigDataPools/sparkPool1

Properties

Link copied to clipboard

An auto_pause block as defined below.

Link copied to clipboard

An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.

Link copied to clipboard
val cacheSize: Output<Int>?

The cache size in the Spark Pool.

Link copied to clipboard

Indicates whether compute isolation is enabled or not. Defaults to false.

Link copied to clipboard
val id: Output<String>
Link copied to clipboard
Link copied to clipboard
val maxExecutors: Output<Int>?
Link copied to clipboard
val minExecutors: Output<Int>?
Link copied to clipboard
val name: Output<String>

The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.

Link copied to clipboard
val nodeCount: Output<Int>?

The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.

Link copied to clipboard
val nodeSize: Output<String>

The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.

Link copied to clipboard
val nodeSizeFamily: Output<String>

The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.

Link copied to clipboard
val pulumiChildResources: Set<KotlinResource>
Link copied to clipboard
Link copied to clipboard
Link copied to clipboard
Link copied to clipboard
Link copied to clipboard
Link copied to clipboard
val sparkLogFolder: Output<String>?
Link copied to clipboard
val sparkVersion: Output<String>?
Link copied to clipboard

The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.

Link copied to clipboard
val tags: Output<Map<String, String>>?
Link copied to clipboard
val urn: Output<String>