Big Data Pool
A Big Data pool API Version: 2021-03-01.
Example Usage
Create or update a Big Data pool
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var bigDataPool = new AzureNative.Synapse.BigDataPool("bigDataPool", new()
{
AutoPause = new AzureNative.Synapse.Inputs.AutoPausePropertiesArgs
{
DelayInMinutes = 15,
Enabled = true,
},
AutoScale = new AzureNative.Synapse.Inputs.AutoScalePropertiesArgs
{
Enabled = true,
MaxNodeCount = 50,
MinNodeCount = 3,
},
BigDataPoolName = "ExamplePool",
DefaultSparkLogFolder = "/logs",
LibraryRequirements = new AzureNative.Synapse.Inputs.LibraryRequirementsArgs
{
Content = "",
Filename = "requirements.txt",
},
Location = "West US 2",
NodeCount = 4,
NodeSize = "Medium",
NodeSizeFamily = "MemoryOptimized",
ResourceGroupName = "ExampleResourceGroup",
SparkEventsFolder = "/events",
SparkVersion = "3.3",
Tags =
{
{ "key", "value" },
},
WorkspaceName = "ExampleWorkspace",
});
});
Content copied to clipboard
package main
import (
synapse "github.com/pulumi/pulumi-azure-native-sdk/synapse"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := synapse.NewBigDataPool(ctx, "bigDataPool", &synapse.BigDataPoolArgs{
AutoPause: &synapse.AutoPausePropertiesArgs{
DelayInMinutes: pulumi.Int(15),
Enabled: pulumi.Bool(true),
},
AutoScale: &synapse.AutoScalePropertiesArgs{
Enabled: pulumi.Bool(true),
MaxNodeCount: pulumi.Int(50),
MinNodeCount: pulumi.Int(3),
},
BigDataPoolName: pulumi.String("ExamplePool"),
DefaultSparkLogFolder: pulumi.String("/logs"),
LibraryRequirements: &synapse.LibraryRequirementsArgs{
Content: pulumi.String(""),
Filename: pulumi.String("requirements.txt"),
},
Location: pulumi.String("West US 2"),
NodeCount: pulumi.Int(4),
NodeSize: pulumi.String("Medium"),
NodeSizeFamily: pulumi.String("MemoryOptimized"),
ResourceGroupName: pulumi.String("ExampleResourceGroup"),
SparkEventsFolder: pulumi.String("/events"),
SparkVersion: pulumi.String("3.3"),
Tags: pulumi.StringMap{
"key": pulumi.String("value"),
},
WorkspaceName: pulumi.String("ExampleWorkspace"),
})
if err != nil {
return err
}
return nil
})
}
Content copied to clipboard
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.synapse.BigDataPool;
import com.pulumi.azurenative.synapse.BigDataPoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bigDataPool = new BigDataPool("bigDataPool", BigDataPoolArgs.builder()
.autoPause(Map.ofEntries(
Map.entry("delayInMinutes", 15),
Map.entry("enabled", true)
))
.autoScale(Map.ofEntries(
Map.entry("enabled", true),
Map.entry("maxNodeCount", 50),
Map.entry("minNodeCount", 3)
))
.bigDataPoolName("ExamplePool")
.defaultSparkLogFolder("/logs")
.libraryRequirements(Map.ofEntries(
Map.entry("content", ""),
Map.entry("filename", "requirements.txt")
))
.location("West US 2")
.nodeCount(4)
.nodeSize("Medium")
.nodeSizeFamily("MemoryOptimized")
.resourceGroupName("ExampleResourceGroup")
.sparkEventsFolder("/events")
.sparkVersion("3.3")
.tags(Map.of("key", "value"))
.workspaceName("ExampleWorkspace")
.build());
}
}
Content copied to clipboard
Import
An existing resource can be imported using its type token, name, and identifier, e.g.
$ pulumi import azure-native:synapse:BigDataPool ExamplePool /subscriptions/01234567-89ab-4def-0123-456789abcdef/resourceGroups/ExampleResourceGroup/providers/Microsoft.Synapse/workspaces/ExampleWorkspace/bigDataPools/ExamplePool
Content copied to clipboard
Properties
Link copied to clipboard
Auto-pausing properties
Link copied to clipboard
Auto-scaling properties
Link copied to clipboard
The time when the Big Data pool was created.
Link copied to clipboard
List of custom libraries/packages associated with the spark pool.
Link copied to clipboard
The default folder where Spark logs will be written.
Link copied to clipboard
Dynamic Executor Allocation
Link copied to clipboard
Whether compute isolation is required or not.
Link copied to clipboard
The time when the Big Data pool was updated successfully.
Link copied to clipboard
Library version requirements
Link copied to clipboard
The kind of nodes that the Big Data pool provides.
Link copied to clipboard
The state of the Big Data pool.
Link copied to clipboard
Link copied to clipboard
Link copied to clipboard
Link copied to clipboard
Whether session level packages enabled.
Link copied to clipboard
Spark configuration file to specify additional properties
Link copied to clipboard
The Spark events folder
Link copied to clipboard
The Apache Spark version.