BigDataPoolArgs

data class BigDataPoolArgs(val autoPause: Output<AutoPausePropertiesArgs>? = null, val autoScale: Output<AutoScalePropertiesArgs>? = null, val bigDataPoolName: Output<String>? = null, val cacheSize: Output<Int>? = null, val customLibraries: Output<List<LibraryInfoArgs>>? = null, val defaultSparkLogFolder: Output<String>? = null, val dynamicExecutorAllocation: Output<DynamicExecutorAllocationArgs>? = null, val force: Output<Boolean>? = null, val isAutotuneEnabled: Output<Boolean>? = null, val isComputeIsolationEnabled: Output<Boolean>? = null, val libraryRequirements: Output<LibraryRequirementsArgs>? = null, val location: Output<String>? = null, val nodeCount: Output<Int>? = null, val nodeSize: Output<Either<String, NodeSize>>? = null, val nodeSizeFamily: Output<Either<String, NodeSizeFamily>>? = null, val provisioningState: Output<String>? = null, val resourceGroupName: Output<String>? = null, val sessionLevelPackagesEnabled: Output<Boolean>? = null, val sparkConfigProperties: Output<SparkConfigPropertiesArgs>? = null, val sparkEventsFolder: Output<String>? = null, val sparkVersion: Output<String>? = null, val tags: Output<Map<String, String>>? = null, val workspaceName: Output<String>? = null) : ConvertibleToJava<BigDataPoolArgs>

A Big Data pool Uses Azure REST API version 2021-06-01. In version 2.x of the Azure Native provider, it used API version 2021-06-01. Other available API versions: 2021-04-01-preview, 2021-05-01, 2021-06-01-preview. These can be accessed by generating a local SDK package using the CLI command pulumi package add azure-native synapse [ApiVersion]. See the ../../../version-guide/#accessing-any-api-version-via-local-packages for details.

Example Usage

Create or update a Big Data pool

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var bigDataPool = new AzureNative.Synapse.BigDataPool("bigDataPool", new()
{
AutoPause = new AzureNative.Synapse.Inputs.AutoPausePropertiesArgs
{
DelayInMinutes = 15,
Enabled = true,
},
AutoScale = new AzureNative.Synapse.Inputs.AutoScalePropertiesArgs
{
Enabled = true,
MaxNodeCount = 50,
MinNodeCount = 3,
},
BigDataPoolName = "ExamplePool",
DefaultSparkLogFolder = "/logs",
IsAutotuneEnabled = false,
LibraryRequirements = new AzureNative.Synapse.Inputs.LibraryRequirementsArgs
{
Content = "",
Filename = "requirements.txt",
},
Location = "West US 2",
NodeCount = 4,
NodeSize = AzureNative.Synapse.NodeSize.Medium,
NodeSizeFamily = AzureNative.Synapse.NodeSizeFamily.MemoryOptimized,
ResourceGroupName = "ExampleResourceGroup",
SparkEventsFolder = "/events",
SparkVersion = "3.3",
Tags =
{
{ "key", "value" },
},
WorkspaceName = "ExampleWorkspace",
});
});
package main
import (
synapse "github.com/pulumi/pulumi-azure-native-sdk/synapse/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := synapse.NewBigDataPool(ctx, "bigDataPool", &synapse.BigDataPoolArgs{
AutoPause: &synapse.AutoPausePropertiesArgs{
DelayInMinutes: pulumi.Int(15),
Enabled: pulumi.Bool(true),
},
AutoScale: &synapse.AutoScalePropertiesArgs{
Enabled: pulumi.Bool(true),
MaxNodeCount: pulumi.Int(50),
MinNodeCount: pulumi.Int(3),
},
BigDataPoolName: pulumi.String("ExamplePool"),
DefaultSparkLogFolder: pulumi.String("/logs"),
IsAutotuneEnabled: pulumi.Bool(false),
LibraryRequirements: &synapse.LibraryRequirementsArgs{
Content: pulumi.String(""),
Filename: pulumi.String("requirements.txt"),
},
Location: pulumi.String("West US 2"),
NodeCount: pulumi.Int(4),
NodeSize: pulumi.String(synapse.NodeSizeMedium),
NodeSizeFamily: pulumi.String(synapse.NodeSizeFamilyMemoryOptimized),
ResourceGroupName: pulumi.String("ExampleResourceGroup"),
SparkEventsFolder: pulumi.String("/events"),
SparkVersion: pulumi.String("3.3"),
Tags: pulumi.StringMap{
"key": pulumi.String("value"),
},
WorkspaceName: pulumi.String("ExampleWorkspace"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.synapse.BigDataPool;
import com.pulumi.azurenative.synapse.BigDataPoolArgs;
import com.pulumi.azurenative.synapse.inputs.AutoPausePropertiesArgs;
import com.pulumi.azurenative.synapse.inputs.AutoScalePropertiesArgs;
import com.pulumi.azurenative.synapse.inputs.LibraryRequirementsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bigDataPool = new BigDataPool("bigDataPool", BigDataPoolArgs.builder()
.autoPause(AutoPausePropertiesArgs.builder()
.delayInMinutes(15)
.enabled(true)
.build())
.autoScale(AutoScalePropertiesArgs.builder()
.enabled(true)
.maxNodeCount(50)
.minNodeCount(3)
.build())
.bigDataPoolName("ExamplePool")
.defaultSparkLogFolder("/logs")
.isAutotuneEnabled(false)
.libraryRequirements(LibraryRequirementsArgs.builder()
.content("")
.filename("requirements.txt")
.build())
.location("West US 2")
.nodeCount(4)
.nodeSize("Medium")
.nodeSizeFamily("MemoryOptimized")
.resourceGroupName("ExampleResourceGroup")
.sparkEventsFolder("/events")
.sparkVersion("3.3")
.tags(Map.of("key", "value"))
.workspaceName("ExampleWorkspace")
.build());
}
}

Import

An existing resource can be imported using its type token, name, and identifier, e.g.

$ pulumi import azure-native:synapse:BigDataPool ExamplePool /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}

Constructors

Link copied to clipboard
constructor(autoPause: Output<AutoPausePropertiesArgs>? = null, autoScale: Output<AutoScalePropertiesArgs>? = null, bigDataPoolName: Output<String>? = null, cacheSize: Output<Int>? = null, customLibraries: Output<List<LibraryInfoArgs>>? = null, defaultSparkLogFolder: Output<String>? = null, dynamicExecutorAllocation: Output<DynamicExecutorAllocationArgs>? = null, force: Output<Boolean>? = null, isAutotuneEnabled: Output<Boolean>? = null, isComputeIsolationEnabled: Output<Boolean>? = null, libraryRequirements: Output<LibraryRequirementsArgs>? = null, location: Output<String>? = null, nodeCount: Output<Int>? = null, nodeSize: Output<Either<String, NodeSize>>? = null, nodeSizeFamily: Output<Either<String, NodeSizeFamily>>? = null, provisioningState: Output<String>? = null, resourceGroupName: Output<String>? = null, sessionLevelPackagesEnabled: Output<Boolean>? = null, sparkConfigProperties: Output<SparkConfigPropertiesArgs>? = null, sparkEventsFolder: Output<String>? = null, sparkVersion: Output<String>? = null, tags: Output<Map<String, String>>? = null, workspaceName: Output<String>? = null)

Properties

Link copied to clipboard
val autoPause: Output<AutoPausePropertiesArgs>? = null

Auto-pausing properties

Link copied to clipboard
val autoScale: Output<AutoScalePropertiesArgs>? = null

Auto-scaling properties

Link copied to clipboard
val bigDataPoolName: Output<String>? = null

Big Data pool name

Link copied to clipboard
val cacheSize: Output<Int>? = null

The cache size

Link copied to clipboard
val customLibraries: Output<List<LibraryInfoArgs>>? = null

List of custom libraries/packages associated with the spark pool.

Link copied to clipboard
val defaultSparkLogFolder: Output<String>? = null

The default folder where Spark logs will be written.

Link copied to clipboard

Dynamic Executor Allocation

Link copied to clipboard
val force: Output<Boolean>? = null

Whether to stop any running jobs in the Big Data pool

Link copied to clipboard
val isAutotuneEnabled: Output<Boolean>? = null

Whether autotune is required or not.

Link copied to clipboard
val isComputeIsolationEnabled: Output<Boolean>? = null

Whether compute isolation is required or not.

Link copied to clipboard

Library version requirements

Link copied to clipboard
val location: Output<String>? = null

The geo-location where the resource lives

Link copied to clipboard
val nodeCount: Output<Int>? = null

The number of nodes in the Big Data pool.

Link copied to clipboard
val nodeSize: Output<Either<String, NodeSize>>? = null

The level of compute power that each node in the Big Data pool has.

Link copied to clipboard
val nodeSizeFamily: Output<Either<String, NodeSizeFamily>>? = null

The kind of nodes that the Big Data pool provides.

Link copied to clipboard
val provisioningState: Output<String>? = null

The state of the Big Data pool.

Link copied to clipboard
val resourceGroupName: Output<String>? = null

The name of the resource group. The name is case insensitive.

Link copied to clipboard

Whether session level packages enabled.

Link copied to clipboard

Spark configuration file to specify additional properties

Link copied to clipboard
val sparkEventsFolder: Output<String>? = null

The Spark events folder

Link copied to clipboard
val sparkVersion: Output<String>? = null

The Apache Spark version.

Link copied to clipboard
val tags: Output<Map<String, String>>? = null

Resource tags.

Link copied to clipboard
val workspaceName: Output<String>? = null

The name of the workspace.

Functions

Link copied to clipboard
open override fun toJava(): BigDataPoolArgs