1. Packages
  2. Azure Classic
  3. API Docs
  4. synapse
  5. SparkPool

We recommend using Azure Native.

Azure Classic v5.73.0 published on Monday, Apr 22, 2024 by Pulumi

azure.synapse.SparkPool

Explore with Pulumi AI

azure logo

We recommend using Azure Native.

Azure Classic v5.73.0 published on Monday, Apr 22, 2024 by Pulumi

    Manages a Synapse Spark Pool.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as azure from "@pulumi/azure";
    
    const example = new azure.core.ResourceGroup("example", {
        name: "example-resources",
        location: "West Europe",
    });
    const exampleAccount = new azure.storage.Account("example", {
        name: "examplestorageacc",
        resourceGroupName: example.name,
        location: example.location,
        accountTier: "Standard",
        accountReplicationType: "LRS",
        accountKind: "StorageV2",
        isHnsEnabled: true,
    });
    const exampleDataLakeGen2Filesystem = new azure.storage.DataLakeGen2Filesystem("example", {
        name: "example",
        storageAccountId: exampleAccount.id,
    });
    const exampleWorkspace = new azure.synapse.Workspace("example", {
        name: "example",
        resourceGroupName: example.name,
        location: example.location,
        storageDataLakeGen2FilesystemId: exampleDataLakeGen2Filesystem.id,
        sqlAdministratorLogin: "sqladminuser",
        sqlAdministratorLoginPassword: "H@Sh1CoR3!",
        identity: {
            type: "SystemAssigned",
        },
    });
    const exampleSparkPool = new azure.synapse.SparkPool("example", {
        name: "example",
        synapseWorkspaceId: exampleWorkspace.id,
        nodeSizeFamily: "MemoryOptimized",
        nodeSize: "Small",
        cacheSize: 100,
        autoScale: {
            maxNodeCount: 50,
            minNodeCount: 3,
        },
        autoPause: {
            delayInMinutes: 15,
        },
        libraryRequirement: {
            content: `appnope==0.1.0
    beautifulsoup4==4.6.3
    `,
            filename: "requirements.txt",
        },
        sparkConfig: {
            content: "spark.shuffle.spill                true\n",
            filename: "config.txt",
        },
        tags: {
            ENV: "Production",
        },
    });
    
    import pulumi
    import pulumi_azure as azure
    
    example = azure.core.ResourceGroup("example",
        name="example-resources",
        location="West Europe")
    example_account = azure.storage.Account("example",
        name="examplestorageacc",
        resource_group_name=example.name,
        location=example.location,
        account_tier="Standard",
        account_replication_type="LRS",
        account_kind="StorageV2",
        is_hns_enabled=True)
    example_data_lake_gen2_filesystem = azure.storage.DataLakeGen2Filesystem("example",
        name="example",
        storage_account_id=example_account.id)
    example_workspace = azure.synapse.Workspace("example",
        name="example",
        resource_group_name=example.name,
        location=example.location,
        storage_data_lake_gen2_filesystem_id=example_data_lake_gen2_filesystem.id,
        sql_administrator_login="sqladminuser",
        sql_administrator_login_password="H@Sh1CoR3!",
        identity=azure.synapse.WorkspaceIdentityArgs(
            type="SystemAssigned",
        ))
    example_spark_pool = azure.synapse.SparkPool("example",
        name="example",
        synapse_workspace_id=example_workspace.id,
        node_size_family="MemoryOptimized",
        node_size="Small",
        cache_size=100,
        auto_scale=azure.synapse.SparkPoolAutoScaleArgs(
            max_node_count=50,
            min_node_count=3,
        ),
        auto_pause=azure.synapse.SparkPoolAutoPauseArgs(
            delay_in_minutes=15,
        ),
        library_requirement=azure.synapse.SparkPoolLibraryRequirementArgs(
            content="""appnope==0.1.0
    beautifulsoup4==4.6.3
    """,
            filename="requirements.txt",
        ),
        spark_config=azure.synapse.SparkPoolSparkConfigArgs(
            content="spark.shuffle.spill                true\n",
            filename="config.txt",
        ),
        tags={
            "ENV": "Production",
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core"
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/synapse"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
    			Name:     pulumi.String("example-resources"),
    			Location: pulumi.String("West Europe"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
    			Name:                   pulumi.String("examplestorageacc"),
    			ResourceGroupName:      example.Name,
    			Location:               example.Location,
    			AccountTier:            pulumi.String("Standard"),
    			AccountReplicationType: pulumi.String("LRS"),
    			AccountKind:            pulumi.String("StorageV2"),
    			IsHnsEnabled:           pulumi.Bool(true),
    		})
    		if err != nil {
    			return err
    		}
    		exampleDataLakeGen2Filesystem, err := storage.NewDataLakeGen2Filesystem(ctx, "example", &storage.DataLakeGen2FilesystemArgs{
    			Name:             pulumi.String("example"),
    			StorageAccountId: exampleAccount.ID(),
    		})
    		if err != nil {
    			return err
    		}
    		exampleWorkspace, err := synapse.NewWorkspace(ctx, "example", &synapse.WorkspaceArgs{
    			Name:                            pulumi.String("example"),
    			ResourceGroupName:               example.Name,
    			Location:                        example.Location,
    			StorageDataLakeGen2FilesystemId: exampleDataLakeGen2Filesystem.ID(),
    			SqlAdministratorLogin:           pulumi.String("sqladminuser"),
    			SqlAdministratorLoginPassword:   pulumi.String("H@Sh1CoR3!"),
    			Identity: &synapse.WorkspaceIdentityArgs{
    				Type: pulumi.String("SystemAssigned"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = synapse.NewSparkPool(ctx, "example", &synapse.SparkPoolArgs{
    			Name:               pulumi.String("example"),
    			SynapseWorkspaceId: exampleWorkspace.ID(),
    			NodeSizeFamily:     pulumi.String("MemoryOptimized"),
    			NodeSize:           pulumi.String("Small"),
    			CacheSize:          pulumi.Int(100),
    			AutoScale: &synapse.SparkPoolAutoScaleArgs{
    				MaxNodeCount: pulumi.Int(50),
    				MinNodeCount: pulumi.Int(3),
    			},
    			AutoPause: &synapse.SparkPoolAutoPauseArgs{
    				DelayInMinutes: pulumi.Int(15),
    			},
    			LibraryRequirement: &synapse.SparkPoolLibraryRequirementArgs{
    				Content:  pulumi.String("appnope==0.1.0\nbeautifulsoup4==4.6.3\n"),
    				Filename: pulumi.String("requirements.txt"),
    			},
    			SparkConfig: &synapse.SparkPoolSparkConfigArgs{
    				Content:  pulumi.String("spark.shuffle.spill                true\n"),
    				Filename: pulumi.String("config.txt"),
    			},
    			Tags: pulumi.StringMap{
    				"ENV": pulumi.String("Production"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Azure = Pulumi.Azure;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Azure.Core.ResourceGroup("example", new()
        {
            Name = "example-resources",
            Location = "West Europe",
        });
    
        var exampleAccount = new Azure.Storage.Account("example", new()
        {
            Name = "examplestorageacc",
            ResourceGroupName = example.Name,
            Location = example.Location,
            AccountTier = "Standard",
            AccountReplicationType = "LRS",
            AccountKind = "StorageV2",
            IsHnsEnabled = true,
        });
    
        var exampleDataLakeGen2Filesystem = new Azure.Storage.DataLakeGen2Filesystem("example", new()
        {
            Name = "example",
            StorageAccountId = exampleAccount.Id,
        });
    
        var exampleWorkspace = new Azure.Synapse.Workspace("example", new()
        {
            Name = "example",
            ResourceGroupName = example.Name,
            Location = example.Location,
            StorageDataLakeGen2FilesystemId = exampleDataLakeGen2Filesystem.Id,
            SqlAdministratorLogin = "sqladminuser",
            SqlAdministratorLoginPassword = "H@Sh1CoR3!",
            Identity = new Azure.Synapse.Inputs.WorkspaceIdentityArgs
            {
                Type = "SystemAssigned",
            },
        });
    
        var exampleSparkPool = new Azure.Synapse.SparkPool("example", new()
        {
            Name = "example",
            SynapseWorkspaceId = exampleWorkspace.Id,
            NodeSizeFamily = "MemoryOptimized",
            NodeSize = "Small",
            CacheSize = 100,
            AutoScale = new Azure.Synapse.Inputs.SparkPoolAutoScaleArgs
            {
                MaxNodeCount = 50,
                MinNodeCount = 3,
            },
            AutoPause = new Azure.Synapse.Inputs.SparkPoolAutoPauseArgs
            {
                DelayInMinutes = 15,
            },
            LibraryRequirement = new Azure.Synapse.Inputs.SparkPoolLibraryRequirementArgs
            {
                Content = @"appnope==0.1.0
    beautifulsoup4==4.6.3
    ",
                Filename = "requirements.txt",
            },
            SparkConfig = new Azure.Synapse.Inputs.SparkPoolSparkConfigArgs
            {
                Content = @"spark.shuffle.spill                true
    ",
                Filename = "config.txt",
            },
            Tags = 
            {
                { "ENV", "Production" },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azure.core.ResourceGroup;
    import com.pulumi.azure.core.ResourceGroupArgs;
    import com.pulumi.azure.storage.Account;
    import com.pulumi.azure.storage.AccountArgs;
    import com.pulumi.azure.storage.DataLakeGen2Filesystem;
    import com.pulumi.azure.storage.DataLakeGen2FilesystemArgs;
    import com.pulumi.azure.synapse.Workspace;
    import com.pulumi.azure.synapse.WorkspaceArgs;
    import com.pulumi.azure.synapse.inputs.WorkspaceIdentityArgs;
    import com.pulumi.azure.synapse.SparkPool;
    import com.pulumi.azure.synapse.SparkPoolArgs;
    import com.pulumi.azure.synapse.inputs.SparkPoolAutoScaleArgs;
    import com.pulumi.azure.synapse.inputs.SparkPoolAutoPauseArgs;
    import com.pulumi.azure.synapse.inputs.SparkPoolLibraryRequirementArgs;
    import com.pulumi.azure.synapse.inputs.SparkPoolSparkConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new ResourceGroup("example", ResourceGroupArgs.builder()        
                .name("example-resources")
                .location("West Europe")
                .build());
    
            var exampleAccount = new Account("exampleAccount", AccountArgs.builder()        
                .name("examplestorageacc")
                .resourceGroupName(example.name())
                .location(example.location())
                .accountTier("Standard")
                .accountReplicationType("LRS")
                .accountKind("StorageV2")
                .isHnsEnabled("true")
                .build());
    
            var exampleDataLakeGen2Filesystem = new DataLakeGen2Filesystem("exampleDataLakeGen2Filesystem", DataLakeGen2FilesystemArgs.builder()        
                .name("example")
                .storageAccountId(exampleAccount.id())
                .build());
    
            var exampleWorkspace = new Workspace("exampleWorkspace", WorkspaceArgs.builder()        
                .name("example")
                .resourceGroupName(example.name())
                .location(example.location())
                .storageDataLakeGen2FilesystemId(exampleDataLakeGen2Filesystem.id())
                .sqlAdministratorLogin("sqladminuser")
                .sqlAdministratorLoginPassword("H@Sh1CoR3!")
                .identity(WorkspaceIdentityArgs.builder()
                    .type("SystemAssigned")
                    .build())
                .build());
    
            var exampleSparkPool = new SparkPool("exampleSparkPool", SparkPoolArgs.builder()        
                .name("example")
                .synapseWorkspaceId(exampleWorkspace.id())
                .nodeSizeFamily("MemoryOptimized")
                .nodeSize("Small")
                .cacheSize(100)
                .autoScale(SparkPoolAutoScaleArgs.builder()
                    .maxNodeCount(50)
                    .minNodeCount(3)
                    .build())
                .autoPause(SparkPoolAutoPauseArgs.builder()
                    .delayInMinutes(15)
                    .build())
                .libraryRequirement(SparkPoolLibraryRequirementArgs.builder()
                    .content("""
    appnope==0.1.0
    beautifulsoup4==4.6.3
                    """)
                    .filename("requirements.txt")
                    .build())
                .sparkConfig(SparkPoolSparkConfigArgs.builder()
                    .content("""
    spark.shuffle.spill                true
                    """)
                    .filename("config.txt")
                    .build())
                .tags(Map.of("ENV", "Production"))
                .build());
    
        }
    }
    
    resources:
      example:
        type: azure:core:ResourceGroup
        properties:
          name: example-resources
          location: West Europe
      exampleAccount:
        type: azure:storage:Account
        name: example
        properties:
          name: examplestorageacc
          resourceGroupName: ${example.name}
          location: ${example.location}
          accountTier: Standard
          accountReplicationType: LRS
          accountKind: StorageV2
          isHnsEnabled: 'true'
      exampleDataLakeGen2Filesystem:
        type: azure:storage:DataLakeGen2Filesystem
        name: example
        properties:
          name: example
          storageAccountId: ${exampleAccount.id}
      exampleWorkspace:
        type: azure:synapse:Workspace
        name: example
        properties:
          name: example
          resourceGroupName: ${example.name}
          location: ${example.location}
          storageDataLakeGen2FilesystemId: ${exampleDataLakeGen2Filesystem.id}
          sqlAdministratorLogin: sqladminuser
          sqlAdministratorLoginPassword: H@Sh1CoR3!
          identity:
            type: SystemAssigned
      exampleSparkPool:
        type: azure:synapse:SparkPool
        name: example
        properties:
          name: example
          synapseWorkspaceId: ${exampleWorkspace.id}
          nodeSizeFamily: MemoryOptimized
          nodeSize: Small
          cacheSize: 100
          autoScale:
            maxNodeCount: 50
            minNodeCount: 3
          autoPause:
            delayInMinutes: 15
          libraryRequirement:
            content: |
              appnope==0.1.0
              beautifulsoup4==4.6.3          
            filename: requirements.txt
          sparkConfig:
            content: |
              spark.shuffle.spill                true          
            filename: config.txt
          tags:
            ENV: Production
    

    Create SparkPool Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new SparkPool(name: string, args: SparkPoolArgs, opts?: CustomResourceOptions);
    @overload
    def SparkPool(resource_name: str,
                  args: SparkPoolArgs,
                  opts: Optional[ResourceOptions] = None)
    
    @overload
    def SparkPool(resource_name: str,
                  opts: Optional[ResourceOptions] = None,
                  node_size: Optional[str] = None,
                  synapse_workspace_id: Optional[str] = None,
                  node_size_family: Optional[str] = None,
                  name: Optional[str] = None,
                  compute_isolation_enabled: Optional[bool] = None,
                  library_requirement: Optional[SparkPoolLibraryRequirementArgs] = None,
                  max_executors: Optional[int] = None,
                  min_executors: Optional[int] = None,
                  auto_pause: Optional[SparkPoolAutoPauseArgs] = None,
                  node_count: Optional[int] = None,
                  dynamic_executor_allocation_enabled: Optional[bool] = None,
                  cache_size: Optional[int] = None,
                  session_level_packages_enabled: Optional[bool] = None,
                  spark_config: Optional[SparkPoolSparkConfigArgs] = None,
                  spark_events_folder: Optional[str] = None,
                  spark_log_folder: Optional[str] = None,
                  spark_version: Optional[str] = None,
                  auto_scale: Optional[SparkPoolAutoScaleArgs] = None,
                  tags: Optional[Mapping[str, str]] = None)
    func NewSparkPool(ctx *Context, name string, args SparkPoolArgs, opts ...ResourceOption) (*SparkPool, error)
    public SparkPool(string name, SparkPoolArgs args, CustomResourceOptions? opts = null)
    public SparkPool(String name, SparkPoolArgs args)
    public SparkPool(String name, SparkPoolArgs args, CustomResourceOptions options)
    
    type: azure:synapse:SparkPool
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args SparkPoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args SparkPoolArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args SparkPoolArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args SparkPoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args SparkPoolArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var sparkPoolResource = new Azure.Synapse.SparkPool("sparkPoolResource", new()
    {
        NodeSize = "string",
        SynapseWorkspaceId = "string",
        NodeSizeFamily = "string",
        Name = "string",
        ComputeIsolationEnabled = false,
        LibraryRequirement = new Azure.Synapse.Inputs.SparkPoolLibraryRequirementArgs
        {
            Content = "string",
            Filename = "string",
        },
        MaxExecutors = 0,
        MinExecutors = 0,
        AutoPause = new Azure.Synapse.Inputs.SparkPoolAutoPauseArgs
        {
            DelayInMinutes = 0,
        },
        NodeCount = 0,
        DynamicExecutorAllocationEnabled = false,
        CacheSize = 0,
        SessionLevelPackagesEnabled = false,
        SparkConfig = new Azure.Synapse.Inputs.SparkPoolSparkConfigArgs
        {
            Content = "string",
            Filename = "string",
        },
        SparkEventsFolder = "string",
        SparkLogFolder = "string",
        SparkVersion = "string",
        AutoScale = new Azure.Synapse.Inputs.SparkPoolAutoScaleArgs
        {
            MaxNodeCount = 0,
            MinNodeCount = 0,
        },
        Tags = 
        {
            { "string", "string" },
        },
    });
    
    example, err := synapse.NewSparkPool(ctx, "sparkPoolResource", &synapse.SparkPoolArgs{
    	NodeSize:                pulumi.String("string"),
    	SynapseWorkspaceId:      pulumi.String("string"),
    	NodeSizeFamily:          pulumi.String("string"),
    	Name:                    pulumi.String("string"),
    	ComputeIsolationEnabled: pulumi.Bool(false),
    	LibraryRequirement: &synapse.SparkPoolLibraryRequirementArgs{
    		Content:  pulumi.String("string"),
    		Filename: pulumi.String("string"),
    	},
    	MaxExecutors: pulumi.Int(0),
    	MinExecutors: pulumi.Int(0),
    	AutoPause: &synapse.SparkPoolAutoPauseArgs{
    		DelayInMinutes: pulumi.Int(0),
    	},
    	NodeCount:                        pulumi.Int(0),
    	DynamicExecutorAllocationEnabled: pulumi.Bool(false),
    	CacheSize:                        pulumi.Int(0),
    	SessionLevelPackagesEnabled:      pulumi.Bool(false),
    	SparkConfig: &synapse.SparkPoolSparkConfigArgs{
    		Content:  pulumi.String("string"),
    		Filename: pulumi.String("string"),
    	},
    	SparkEventsFolder: pulumi.String("string"),
    	SparkLogFolder:    pulumi.String("string"),
    	SparkVersion:      pulumi.String("string"),
    	AutoScale: &synapse.SparkPoolAutoScaleArgs{
    		MaxNodeCount: pulumi.Int(0),
    		MinNodeCount: pulumi.Int(0),
    	},
    	Tags: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    })
    
    var sparkPoolResource = new SparkPool("sparkPoolResource", SparkPoolArgs.builder()        
        .nodeSize("string")
        .synapseWorkspaceId("string")
        .nodeSizeFamily("string")
        .name("string")
        .computeIsolationEnabled(false)
        .libraryRequirement(SparkPoolLibraryRequirementArgs.builder()
            .content("string")
            .filename("string")
            .build())
        .maxExecutors(0)
        .minExecutors(0)
        .autoPause(SparkPoolAutoPauseArgs.builder()
            .delayInMinutes(0)
            .build())
        .nodeCount(0)
        .dynamicExecutorAllocationEnabled(false)
        .cacheSize(0)
        .sessionLevelPackagesEnabled(false)
        .sparkConfig(SparkPoolSparkConfigArgs.builder()
            .content("string")
            .filename("string")
            .build())
        .sparkEventsFolder("string")
        .sparkLogFolder("string")
        .sparkVersion("string")
        .autoScale(SparkPoolAutoScaleArgs.builder()
            .maxNodeCount(0)
            .minNodeCount(0)
            .build())
        .tags(Map.of("string", "string"))
        .build());
    
    spark_pool_resource = azure.synapse.SparkPool("sparkPoolResource",
        node_size="string",
        synapse_workspace_id="string",
        node_size_family="string",
        name="string",
        compute_isolation_enabled=False,
        library_requirement=azure.synapse.SparkPoolLibraryRequirementArgs(
            content="string",
            filename="string",
        ),
        max_executors=0,
        min_executors=0,
        auto_pause=azure.synapse.SparkPoolAutoPauseArgs(
            delay_in_minutes=0,
        ),
        node_count=0,
        dynamic_executor_allocation_enabled=False,
        cache_size=0,
        session_level_packages_enabled=False,
        spark_config=azure.synapse.SparkPoolSparkConfigArgs(
            content="string",
            filename="string",
        ),
        spark_events_folder="string",
        spark_log_folder="string",
        spark_version="string",
        auto_scale=azure.synapse.SparkPoolAutoScaleArgs(
            max_node_count=0,
            min_node_count=0,
        ),
        tags={
            "string": "string",
        })
    
    const sparkPoolResource = new azure.synapse.SparkPool("sparkPoolResource", {
        nodeSize: "string",
        synapseWorkspaceId: "string",
        nodeSizeFamily: "string",
        name: "string",
        computeIsolationEnabled: false,
        libraryRequirement: {
            content: "string",
            filename: "string",
        },
        maxExecutors: 0,
        minExecutors: 0,
        autoPause: {
            delayInMinutes: 0,
        },
        nodeCount: 0,
        dynamicExecutorAllocationEnabled: false,
        cacheSize: 0,
        sessionLevelPackagesEnabled: false,
        sparkConfig: {
            content: "string",
            filename: "string",
        },
        sparkEventsFolder: "string",
        sparkLogFolder: "string",
        sparkVersion: "string",
        autoScale: {
            maxNodeCount: 0,
            minNodeCount: 0,
        },
        tags: {
            string: "string",
        },
    });
    
    type: azure:synapse:SparkPool
    properties:
        autoPause:
            delayInMinutes: 0
        autoScale:
            maxNodeCount: 0
            minNodeCount: 0
        cacheSize: 0
        computeIsolationEnabled: false
        dynamicExecutorAllocationEnabled: false
        libraryRequirement:
            content: string
            filename: string
        maxExecutors: 0
        minExecutors: 0
        name: string
        nodeCount: 0
        nodeSize: string
        nodeSizeFamily: string
        sessionLevelPackagesEnabled: false
        sparkConfig:
            content: string
            filename: string
        sparkEventsFolder: string
        sparkLogFolder: string
        sparkVersion: string
        synapseWorkspaceId: string
        tags:
            string: string
    

    SparkPool Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The SparkPool resource accepts the following input properties:

    NodeSize string
    The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.
    NodeSizeFamily string
    The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.
    SynapseWorkspaceId string
    The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
    AutoPause SparkPoolAutoPause
    An auto_pause block as defined below.
    AutoScale SparkPoolAutoScale
    An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.
    CacheSize int
    The cache size in the Spark Pool.
    ComputeIsolationEnabled bool
    Indicates whether compute isolation is enabled or not. Defaults to false.
    DynamicExecutorAllocationEnabled bool
    Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false.
    LibraryRequirement SparkPoolLibraryRequirement
    A library_requirement block as defined below.
    MaxExecutors int
    The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    MinExecutors int
    The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    Name string
    The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
    NodeCount int
    The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.
    SessionLevelPackagesEnabled bool
    Indicates whether session level packages are enabled or not. Defaults to false.
    SparkConfig SparkPoolSparkConfig
    A spark_config block as defined below.
    SparkEventsFolder string
    The Spark events folder. Defaults to /events.
    SparkLogFolder string
    The default folder where Spark logs will be written. Defaults to /logs.
    SparkVersion string
    The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2, 3.3, and 3.4. Defaults to 2.4.
    Tags Dictionary<string, string>
    A mapping of tags which should be assigned to the Synapse Spark Pool.
    NodeSize string
    The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.
    NodeSizeFamily string
    The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.
    SynapseWorkspaceId string
    The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
    AutoPause SparkPoolAutoPauseArgs
    An auto_pause block as defined below.
    AutoScale SparkPoolAutoScaleArgs
    An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.
    CacheSize int
    The cache size in the Spark Pool.
    ComputeIsolationEnabled bool
    Indicates whether compute isolation is enabled or not. Defaults to false.
    DynamicExecutorAllocationEnabled bool
    Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false.
    LibraryRequirement SparkPoolLibraryRequirementArgs
    A library_requirement block as defined below.
    MaxExecutors int
    The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    MinExecutors int
    The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    Name string
    The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
    NodeCount int
    The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.
    SessionLevelPackagesEnabled bool
    Indicates whether session level packages are enabled or not. Defaults to false.
    SparkConfig SparkPoolSparkConfigArgs
    A spark_config block as defined below.
    SparkEventsFolder string
    The Spark events folder. Defaults to /events.
    SparkLogFolder string
    The default folder where Spark logs will be written. Defaults to /logs.
    SparkVersion string
    The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2, 3.3, and 3.4. Defaults to 2.4.
    Tags map[string]string
    A mapping of tags which should be assigned to the Synapse Spark Pool.
    nodeSize String
    The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.
    nodeSizeFamily String
    The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.
    synapseWorkspaceId String
    The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
    autoPause SparkPoolAutoPause
    An auto_pause block as defined below.
    autoScale SparkPoolAutoScale
    An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.
    cacheSize Integer
    The cache size in the Spark Pool.
    computeIsolationEnabled Boolean
    Indicates whether compute isolation is enabled or not. Defaults to false.
    dynamicExecutorAllocationEnabled Boolean
    Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false.
    libraryRequirement SparkPoolLibraryRequirement
    A library_requirement block as defined below.
    maxExecutors Integer
    The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    minExecutors Integer
    The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    name String
    The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
    nodeCount Integer
    The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.
    sessionLevelPackagesEnabled Boolean
    Indicates whether session level packages are enabled or not. Defaults to false.
    sparkConfig SparkPoolSparkConfig
    A spark_config block as defined below.
    sparkEventsFolder String
    The Spark events folder. Defaults to /events.
    sparkLogFolder String
    The default folder where Spark logs will be written. Defaults to /logs.
    sparkVersion String
    The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2, 3.3, and 3.4. Defaults to 2.4.
    tags Map<String,String>
    A mapping of tags which should be assigned to the Synapse Spark Pool.
    nodeSize string
    The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.
    nodeSizeFamily string
    The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.
    synapseWorkspaceId string
    The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
    autoPause SparkPoolAutoPause
    An auto_pause block as defined below.
    autoScale SparkPoolAutoScale
    An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.
    cacheSize number
    The cache size in the Spark Pool.
    computeIsolationEnabled boolean
    Indicates whether compute isolation is enabled or not. Defaults to false.
    dynamicExecutorAllocationEnabled boolean
    Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false.
    libraryRequirement SparkPoolLibraryRequirement
    A library_requirement block as defined below.
    maxExecutors number
    The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    minExecutors number
    The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    name string
    The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
    nodeCount number
    The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.
    sessionLevelPackagesEnabled boolean
    Indicates whether session level packages are enabled or not. Defaults to false.
    sparkConfig SparkPoolSparkConfig
    A spark_config block as defined below.
    sparkEventsFolder string
    The Spark events folder. Defaults to /events.
    sparkLogFolder string
    The default folder where Spark logs will be written. Defaults to /logs.
    sparkVersion string
    The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2, 3.3, and 3.4. Defaults to 2.4.
    tags {[key: string]: string}
    A mapping of tags which should be assigned to the Synapse Spark Pool.
    node_size str
    The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.
    node_size_family str
    The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.
    synapse_workspace_id str
    The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
    auto_pause SparkPoolAutoPauseArgs
    An auto_pause block as defined below.
    auto_scale SparkPoolAutoScaleArgs
    An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.
    cache_size int
    The cache size in the Spark Pool.
    compute_isolation_enabled bool
    Indicates whether compute isolation is enabled or not. Defaults to false.
    dynamic_executor_allocation_enabled bool
    Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false.
    library_requirement SparkPoolLibraryRequirementArgs
    A library_requirement block as defined below.
    max_executors int
    The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    min_executors int
    The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    name str
    The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
    node_count int
    The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.
    session_level_packages_enabled bool
    Indicates whether session level packages are enabled or not. Defaults to false.
    spark_config SparkPoolSparkConfigArgs
    A spark_config block as defined below.
    spark_events_folder str
    The Spark events folder. Defaults to /events.
    spark_log_folder str
    The default folder where Spark logs will be written. Defaults to /logs.
    spark_version str
    The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2, 3.3, and 3.4. Defaults to 2.4.
    tags Mapping[str, str]
    A mapping of tags which should be assigned to the Synapse Spark Pool.
    nodeSize String
    The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.
    nodeSizeFamily String
    The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.
    synapseWorkspaceId String
    The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
    autoPause Property Map
    An auto_pause block as defined below.
    autoScale Property Map
    An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.
    cacheSize Number
    The cache size in the Spark Pool.
    computeIsolationEnabled Boolean
    Indicates whether compute isolation is enabled or not. Defaults to false.
    dynamicExecutorAllocationEnabled Boolean
    Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false.
    libraryRequirement Property Map
    A library_requirement block as defined below.
    maxExecutors Number
    The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    minExecutors Number
    The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    name String
    The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
    nodeCount Number
    The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.
    sessionLevelPackagesEnabled Boolean
    Indicates whether session level packages are enabled or not. Defaults to false.
    sparkConfig Property Map
    A spark_config block as defined below.
    sparkEventsFolder String
    The Spark events folder. Defaults to /events.
    sparkLogFolder String
    The default folder where Spark logs will be written. Defaults to /logs.
    sparkVersion String
    The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2, 3.3, and 3.4. Defaults to 2.4.
    tags Map<String>
    A mapping of tags which should be assigned to the Synapse Spark Pool.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the SparkPool resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing SparkPool Resource

    Get an existing SparkPool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: SparkPoolState, opts?: CustomResourceOptions): SparkPool
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            auto_pause: Optional[SparkPoolAutoPauseArgs] = None,
            auto_scale: Optional[SparkPoolAutoScaleArgs] = None,
            cache_size: Optional[int] = None,
            compute_isolation_enabled: Optional[bool] = None,
            dynamic_executor_allocation_enabled: Optional[bool] = None,
            library_requirement: Optional[SparkPoolLibraryRequirementArgs] = None,
            max_executors: Optional[int] = None,
            min_executors: Optional[int] = None,
            name: Optional[str] = None,
            node_count: Optional[int] = None,
            node_size: Optional[str] = None,
            node_size_family: Optional[str] = None,
            session_level_packages_enabled: Optional[bool] = None,
            spark_config: Optional[SparkPoolSparkConfigArgs] = None,
            spark_events_folder: Optional[str] = None,
            spark_log_folder: Optional[str] = None,
            spark_version: Optional[str] = None,
            synapse_workspace_id: Optional[str] = None,
            tags: Optional[Mapping[str, str]] = None) -> SparkPool
    func GetSparkPool(ctx *Context, name string, id IDInput, state *SparkPoolState, opts ...ResourceOption) (*SparkPool, error)
    public static SparkPool Get(string name, Input<string> id, SparkPoolState? state, CustomResourceOptions? opts = null)
    public static SparkPool get(String name, Output<String> id, SparkPoolState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AutoPause SparkPoolAutoPause
    An auto_pause block as defined below.
    AutoScale SparkPoolAutoScale
    An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.
    CacheSize int
    The cache size in the Spark Pool.
    ComputeIsolationEnabled bool
    Indicates whether compute isolation is enabled or not. Defaults to false.
    DynamicExecutorAllocationEnabled bool
    Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false.
    LibraryRequirement SparkPoolLibraryRequirement
    A library_requirement block as defined below.
    MaxExecutors int
    The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    MinExecutors int
    The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    Name string
    The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
    NodeCount int
    The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.
    NodeSize string
    The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.
    NodeSizeFamily string
    The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.
    SessionLevelPackagesEnabled bool
    Indicates whether session level packages are enabled or not. Defaults to false.
    SparkConfig SparkPoolSparkConfig
    A spark_config block as defined below.
    SparkEventsFolder string
    The Spark events folder. Defaults to /events.
    SparkLogFolder string
    The default folder where Spark logs will be written. Defaults to /logs.
    SparkVersion string
    The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2, 3.3, and 3.4. Defaults to 2.4.
    SynapseWorkspaceId string
    The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
    Tags Dictionary<string, string>
    A mapping of tags which should be assigned to the Synapse Spark Pool.
    AutoPause SparkPoolAutoPauseArgs
    An auto_pause block as defined below.
    AutoScale SparkPoolAutoScaleArgs
    An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.
    CacheSize int
    The cache size in the Spark Pool.
    ComputeIsolationEnabled bool
    Indicates whether compute isolation is enabled or not. Defaults to false.
    DynamicExecutorAllocationEnabled bool
    Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false.
    LibraryRequirement SparkPoolLibraryRequirementArgs
    A library_requirement block as defined below.
    MaxExecutors int
    The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    MinExecutors int
    The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    Name string
    The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
    NodeCount int
    The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.
    NodeSize string
    The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.
    NodeSizeFamily string
    The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.
    SessionLevelPackagesEnabled bool
    Indicates whether session level packages are enabled or not. Defaults to false.
    SparkConfig SparkPoolSparkConfigArgs
    A spark_config block as defined below.
    SparkEventsFolder string
    The Spark events folder. Defaults to /events.
    SparkLogFolder string
    The default folder where Spark logs will be written. Defaults to /logs.
    SparkVersion string
    The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2, 3.3, and 3.4. Defaults to 2.4.
    SynapseWorkspaceId string
    The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
    Tags map[string]string
    A mapping of tags which should be assigned to the Synapse Spark Pool.
    autoPause SparkPoolAutoPause
    An auto_pause block as defined below.
    autoScale SparkPoolAutoScale
    An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.
    cacheSize Integer
    The cache size in the Spark Pool.
    computeIsolationEnabled Boolean
    Indicates whether compute isolation is enabled or not. Defaults to false.
    dynamicExecutorAllocationEnabled Boolean
    Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false.
    libraryRequirement SparkPoolLibraryRequirement
    A library_requirement block as defined below.
    maxExecutors Integer
    The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    minExecutors Integer
    The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    name String
    The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
    nodeCount Integer
    The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.
    nodeSize String
    The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.
    nodeSizeFamily String
    The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.
    sessionLevelPackagesEnabled Boolean
    Indicates whether session level packages are enabled or not. Defaults to false.
    sparkConfig SparkPoolSparkConfig
    A spark_config block as defined below.
    sparkEventsFolder String
    The Spark events folder. Defaults to /events.
    sparkLogFolder String
    The default folder where Spark logs will be written. Defaults to /logs.
    sparkVersion String
    The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2, 3.3, and 3.4. Defaults to 2.4.
    synapseWorkspaceId String
    The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
    tags Map<String,String>
    A mapping of tags which should be assigned to the Synapse Spark Pool.
    autoPause SparkPoolAutoPause
    An auto_pause block as defined below.
    autoScale SparkPoolAutoScale
    An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.
    cacheSize number
    The cache size in the Spark Pool.
    computeIsolationEnabled boolean
    Indicates whether compute isolation is enabled or not. Defaults to false.
    dynamicExecutorAllocationEnabled boolean
    Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false.
    libraryRequirement SparkPoolLibraryRequirement
    A library_requirement block as defined below.
    maxExecutors number
    The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    minExecutors number
    The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    name string
    The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
    nodeCount number
    The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.
    nodeSize string
    The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.
    nodeSizeFamily string
    The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.
    sessionLevelPackagesEnabled boolean
    Indicates whether session level packages are enabled or not. Defaults to false.
    sparkConfig SparkPoolSparkConfig
    A spark_config block as defined below.
    sparkEventsFolder string
    The Spark events folder. Defaults to /events.
    sparkLogFolder string
    The default folder where Spark logs will be written. Defaults to /logs.
    sparkVersion string
    The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2, 3.3, and 3.4. Defaults to 2.4.
    synapseWorkspaceId string
    The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
    tags {[key: string]: string}
    A mapping of tags which should be assigned to the Synapse Spark Pool.
    auto_pause SparkPoolAutoPauseArgs
    An auto_pause block as defined below.
    auto_scale SparkPoolAutoScaleArgs
    An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.
    cache_size int
    The cache size in the Spark Pool.
    compute_isolation_enabled bool
    Indicates whether compute isolation is enabled or not. Defaults to false.
    dynamic_executor_allocation_enabled bool
    Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false.
    library_requirement SparkPoolLibraryRequirementArgs
    A library_requirement block as defined below.
    max_executors int
    The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    min_executors int
    The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    name str
    The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
    node_count int
    The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.
    node_size str
    The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.
    node_size_family str
    The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.
    session_level_packages_enabled bool
    Indicates whether session level packages are enabled or not. Defaults to false.
    spark_config SparkPoolSparkConfigArgs
    A spark_config block as defined below.
    spark_events_folder str
    The Spark events folder. Defaults to /events.
    spark_log_folder str
    The default folder where Spark logs will be written. Defaults to /logs.
    spark_version str
    The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2, 3.3, and 3.4. Defaults to 2.4.
    synapse_workspace_id str
    The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
    tags Mapping[str, str]
    A mapping of tags which should be assigned to the Synapse Spark Pool.
    autoPause Property Map
    An auto_pause block as defined below.
    autoScale Property Map
    An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified.
    cacheSize Number
    The cache size in the Spark Pool.
    computeIsolationEnabled Boolean
    Indicates whether compute isolation is enabled or not. Defaults to false.
    dynamicExecutorAllocationEnabled Boolean
    Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false.
    libraryRequirement Property Map
    A library_requirement block as defined below.
    maxExecutors Number
    The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    minExecutors Number
    The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true.
    name String
    The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
    nodeCount Number
    The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified.
    nodeSize String
    The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge.
    nodeSizeFamily String
    The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None.
    sessionLevelPackagesEnabled Boolean
    Indicates whether session level packages are enabled or not. Defaults to false.
    sparkConfig Property Map
    A spark_config block as defined below.
    sparkEventsFolder String
    The Spark events folder. Defaults to /events.
    sparkLogFolder String
    The default folder where Spark logs will be written. Defaults to /logs.
    sparkVersion String
    The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2, 3.3, and 3.4. Defaults to 2.4.
    synapseWorkspaceId String
    The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
    tags Map<String>
    A mapping of tags which should be assigned to the Synapse Spark Pool.

    Supporting Types

    SparkPoolAutoPause, SparkPoolAutoPauseArgs

    DelayInMinutes int
    Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5 and 10080.
    DelayInMinutes int
    Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5 and 10080.
    delayInMinutes Integer
    Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5 and 10080.
    delayInMinutes number
    Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5 and 10080.
    delay_in_minutes int
    Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5 and 10080.
    delayInMinutes Number
    Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5 and 10080.

    SparkPoolAutoScale, SparkPoolAutoScaleArgs

    MaxNodeCount int
    The maximum number of nodes the Spark Pool can support. Must be between 3 and 200.
    MinNodeCount int
    The minimum number of nodes the Spark Pool can support. Must be between 3 and 200.
    MaxNodeCount int
    The maximum number of nodes the Spark Pool can support. Must be between 3 and 200.
    MinNodeCount int
    The minimum number of nodes the Spark Pool can support. Must be between 3 and 200.
    maxNodeCount Integer
    The maximum number of nodes the Spark Pool can support. Must be between 3 and 200.
    minNodeCount Integer
    The minimum number of nodes the Spark Pool can support. Must be between 3 and 200.
    maxNodeCount number
    The maximum number of nodes the Spark Pool can support. Must be between 3 and 200.
    minNodeCount number
    The minimum number of nodes the Spark Pool can support. Must be between 3 and 200.
    max_node_count int
    The maximum number of nodes the Spark Pool can support. Must be between 3 and 200.
    min_node_count int
    The minimum number of nodes the Spark Pool can support. Must be between 3 and 200.
    maxNodeCount Number
    The maximum number of nodes the Spark Pool can support. Must be between 3 and 200.
    minNodeCount Number
    The minimum number of nodes the Spark Pool can support. Must be between 3 and 200.

    SparkPoolLibraryRequirement, SparkPoolLibraryRequirementArgs

    Content string
    The content of library requirements.
    Filename string
    The name of the library requirements file.
    Content string
    The content of library requirements.
    Filename string
    The name of the library requirements file.
    content String
    The content of library requirements.
    filename String
    The name of the library requirements file.
    content string
    The content of library requirements.
    filename string
    The name of the library requirements file.
    content str
    The content of library requirements.
    filename str
    The name of the library requirements file.
    content String
    The content of library requirements.
    filename String
    The name of the library requirements file.

    SparkPoolSparkConfig, SparkPoolSparkConfigArgs

    Content string
    The contents of a spark configuration.
    Filename string
    The name of the file where the spark configuration content will be stored.
    Content string
    The contents of a spark configuration.
    Filename string
    The name of the file where the spark configuration content will be stored.
    content String
    The contents of a spark configuration.
    filename String
    The name of the file where the spark configuration content will be stored.
    content string
    The contents of a spark configuration.
    filename string
    The name of the file where the spark configuration content will be stored.
    content str
    The contents of a spark configuration.
    filename str
    The name of the file where the spark configuration content will be stored.
    content String
    The contents of a spark configuration.
    filename String
    The name of the file where the spark configuration content will be stored.

    Import

    Synapse Spark Pool can be imported using the resource id, e.g.

    $ pulumi import azure:synapse/sparkPool:SparkPool example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Synapse/workspaces/workspace1/bigDataPools/sparkPool1
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Azure Classic pulumi/pulumi-azure
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the azurerm Terraform Provider.
    azure logo

    We recommend using Azure Native.

    Azure Classic v5.73.0 published on Monday, Apr 22, 2024 by Pulumi