The azure-native:machinelearningservices:Compute resource, part of the Pulumi Azure Native provider, defines compute resources within an Azure Machine Learning workspace: development instances, training clusters, and attached infrastructure. This guide focuses on four capabilities: ComputeInstance for interactive development, AmlCompute clusters with autoscaling, cost optimization through scheduling, and custom Docker services.
Compute resources belong to an Azure Machine Learning workspace and may reference virtual network subnets, custom VM images, or container registries. The examples are intentionally small. Combine them with your own workspace, networking, and identity configuration.
Create a compute instance with minimal configuration
Most ML workflows start with a single-user development environment for data exploration and model prototyping. A ComputeInstance provides a managed Jupyter notebook server and terminal access.
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const compute = new azure_native.machinelearningservices.Compute("compute", {
computeName: "compute123",
location: "eastus",
properties: {
computeType: "ComputeInstance",
properties: {
vmSize: "STANDARD_NC6",
},
},
resourceGroupName: "testrg123",
workspaceName: "workspaces123",
});
import pulumi
import pulumi_azure_native as azure_native
compute = azure_native.machinelearningservices.Compute("compute",
compute_name="compute123",
location="eastus",
properties={
"compute_type": "ComputeInstance",
"properties": {
"vm_size": "STANDARD_NC6",
},
},
resource_group_name="testrg123",
workspace_name="workspaces123")
package main
import (
machinelearningservices "github.com/pulumi/pulumi-azure-native-sdk/machinelearningservices/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := machinelearningservices.NewCompute(ctx, "compute", &machinelearningservices.ComputeArgs{
ComputeName: pulumi.String("compute123"),
Location: pulumi.String("eastus"),
Properties: &machinelearningservices.ComputeInstanceArgs{
ComputeType: pulumi.String("ComputeInstance"),
Properties: &machinelearningservices.ComputeInstancePropertiesArgs{
VmSize: pulumi.String("STANDARD_NC6"),
},
},
ResourceGroupName: pulumi.String("testrg123"),
WorkspaceName: pulumi.String("workspaces123"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var compute = new AzureNative.MachineLearningServices.Compute("compute", new()
{
ComputeName = "compute123",
Location = "eastus",
Properties = new AzureNative.MachineLearningServices.Inputs.ComputeInstanceArgs
{
ComputeType = "ComputeInstance",
Properties = new AzureNative.MachineLearningServices.Inputs.ComputeInstancePropertiesArgs
{
VmSize = "STANDARD_NC6",
},
},
ResourceGroupName = "testrg123",
WorkspaceName = "workspaces123",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.machinelearningservices.Compute;
import com.pulumi.azurenative.machinelearningservices.ComputeArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var compute = new Compute("compute", ComputeArgs.builder()
.computeName("compute123")
.location("eastus")
.properties(ComputeInstanceArgs.builder()
.computeType("ComputeInstance")
.properties(ComputeInstancePropertiesArgs.builder()
.vmSize("STANDARD_NC6")
.build())
.build())
.resourceGroupName("testrg123")
.workspaceName("workspaces123")
.build());
}
}
resources:
compute:
type: azure-native:machinelearningservices:Compute
properties:
computeName: compute123
location: eastus
properties:
computeType: ComputeInstance
properties:
vmSize: STANDARD_NC6
resourceGroupName: testrg123
workspaceName: workspaces123
The computeType property specifies “ComputeInstance” to create a personal development environment. The vmSize property determines the VM SKU. Without additional configuration, the instance uses default settings: no SSH access, no custom services, and no automatic shutdown schedules.
Configure autoscaling clusters for training workloads
Training jobs often require clusters that scale from zero to multiple nodes based on demand, reducing costs when idle.
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const compute = new azure_native.machinelearningservices.Compute("compute", {
computeName: "compute123",
location: "eastus",
properties: {
computeType: "AmlCompute",
properties: {
enableNodePublicIp: true,
isolatedNetwork: false,
osType: azure_native.machinelearningservices.OsType.Windows,
remoteLoginPortPublicAccess: azure_native.machinelearningservices.RemoteLoginPortPublicAccess.NotSpecified,
scaleSettings: {
maxNodeCount: 1,
minNodeCount: 0,
nodeIdleTimeBeforeScaleDown: "PT5M",
},
virtualMachineImage: {
id: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myImageGallery/images/myImageDefinition/versions/0.0.1",
},
vmPriority: azure_native.machinelearningservices.VmPriority.Dedicated,
vmSize: "STANDARD_NC6",
},
},
resourceGroupName: "testrg123",
workspaceName: "workspaces123",
});
import pulumi
import pulumi_azure_native as azure_native
compute = azure_native.machinelearningservices.Compute("compute",
compute_name="compute123",
location="eastus",
properties={
"compute_type": "AmlCompute",
"properties": {
"enable_node_public_ip": True,
"isolated_network": False,
"os_type": azure_native.machinelearningservices.OsType.WINDOWS,
"remote_login_port_public_access": azure_native.machinelearningservices.RemoteLoginPortPublicAccess.NOT_SPECIFIED,
"scale_settings": {
"max_node_count": 1,
"min_node_count": 0,
"node_idle_time_before_scale_down": "PT5M",
},
"virtual_machine_image": {
"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myImageGallery/images/myImageDefinition/versions/0.0.1",
},
"vm_priority": azure_native.machinelearningservices.VmPriority.DEDICATED,
"vm_size": "STANDARD_NC6",
},
},
resource_group_name="testrg123",
workspace_name="workspaces123")
package main
import (
machinelearningservices "github.com/pulumi/pulumi-azure-native-sdk/machinelearningservices/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := machinelearningservices.NewCompute(ctx, "compute", &machinelearningservices.ComputeArgs{
ComputeName: pulumi.String("compute123"),
Location: pulumi.String("eastus"),
Properties: &machinelearningservices.AmlComputeArgs{
ComputeType: pulumi.String("AmlCompute"),
Properties: &machinelearningservices.AmlComputePropertiesArgs{
EnableNodePublicIp: pulumi.Bool(true),
IsolatedNetwork: pulumi.Bool(false),
OsType: pulumi.String(machinelearningservices.OsTypeWindows),
RemoteLoginPortPublicAccess: pulumi.String(machinelearningservices.RemoteLoginPortPublicAccessNotSpecified),
ScaleSettings: &machinelearningservices.ScaleSettingsArgs{
MaxNodeCount: pulumi.Int(1),
MinNodeCount: pulumi.Int(0),
NodeIdleTimeBeforeScaleDown: pulumi.String("PT5M"),
},
VirtualMachineImage: &machinelearningservices.VirtualMachineImageArgs{
Id: pulumi.String("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myImageGallery/images/myImageDefinition/versions/0.0.1"),
},
VmPriority: pulumi.String(machinelearningservices.VmPriorityDedicated),
VmSize: pulumi.String("STANDARD_NC6"),
},
},
ResourceGroupName: pulumi.String("testrg123"),
WorkspaceName: pulumi.String("workspaces123"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var compute = new AzureNative.MachineLearningServices.Compute("compute", new()
{
ComputeName = "compute123",
Location = "eastus",
Properties = new AzureNative.MachineLearningServices.Inputs.AmlComputeArgs
{
ComputeType = "AmlCompute",
Properties = new AzureNative.MachineLearningServices.Inputs.AmlComputePropertiesArgs
{
EnableNodePublicIp = true,
IsolatedNetwork = false,
OsType = AzureNative.MachineLearningServices.OsType.Windows,
RemoteLoginPortPublicAccess = AzureNative.MachineLearningServices.RemoteLoginPortPublicAccess.NotSpecified,
ScaleSettings = new AzureNative.MachineLearningServices.Inputs.ScaleSettingsArgs
{
MaxNodeCount = 1,
MinNodeCount = 0,
NodeIdleTimeBeforeScaleDown = "PT5M",
},
VirtualMachineImage = new AzureNative.MachineLearningServices.Inputs.VirtualMachineImageArgs
{
Id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myImageGallery/images/myImageDefinition/versions/0.0.1",
},
VmPriority = AzureNative.MachineLearningServices.VmPriority.Dedicated,
VmSize = "STANDARD_NC6",
},
},
ResourceGroupName = "testrg123",
WorkspaceName = "workspaces123",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.machinelearningservices.Compute;
import com.pulumi.azurenative.machinelearningservices.ComputeArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var compute = new Compute("compute", ComputeArgs.builder()
.computeName("compute123")
.location("eastus")
.properties(AmlComputeArgs.builder()
.computeType("AmlCompute")
.properties(AmlComputePropertiesArgs.builder()
.enableNodePublicIp(true)
.isolatedNetwork(false)
.osType("Windows")
.remoteLoginPortPublicAccess("NotSpecified")
.scaleSettings(ScaleSettingsArgs.builder()
.maxNodeCount(1)
.minNodeCount(0)
.nodeIdleTimeBeforeScaleDown("PT5M")
.build())
.virtualMachineImage(VirtualMachineImageArgs.builder()
.id("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myImageGallery/images/myImageDefinition/versions/0.0.1")
.build())
.vmPriority("Dedicated")
.vmSize("STANDARD_NC6")
.build())
.build())
.resourceGroupName("testrg123")
.workspaceName("workspaces123")
.build());
}
}
resources:
compute:
type: azure-native:machinelearningservices:Compute
properties:
computeName: compute123
location: eastus
properties:
computeType: AmlCompute
properties:
enableNodePublicIp: true
isolatedNetwork: false
osType: Windows
remoteLoginPortPublicAccess: NotSpecified
scaleSettings:
maxNodeCount: 1
minNodeCount: 0
nodeIdleTimeBeforeScaleDown: PT5M
virtualMachineImage:
id: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myImageGallery/images/myImageDefinition/versions/0.0.1
vmPriority: Dedicated
vmSize: STANDARD_NC6
resourceGroupName: testrg123
workspaceName: workspaces123
The scaleSettings block controls cluster behavior: minNodeCount sets the idle cluster size (often zero), maxNodeCount caps the maximum nodes, and nodeIdleTimeBeforeScaleDown defines how long nodes wait before scaling down. The cluster automatically provisions nodes when jobs arrive and deallocates them after the idle timeout. The virtualMachineImage property references a custom image from Azure Compute Gallery; omit this to use default Azure ML images.
Schedule automatic start and stop times
Development instances can accumulate costs during nights and weekends. Schedules automate shutdown during off-hours.
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const compute = new azure_native.machinelearningservices.Compute("compute", {
computeName: "compute123",
location: "eastus",
properties: {
computeType: "ComputeInstance",
properties: {
applicationSharingPolicy: azure_native.machinelearningservices.ApplicationSharingPolicy.Personal,
computeInstanceAuthorizationType: azure_native.machinelearningservices.ComputeInstanceAuthorizationType.Personal,
personalComputeInstanceSettings: {
assignedUser: {
objectId: "00000000-0000-0000-0000-000000000000",
tenantId: "00000000-0000-0000-0000-000000000000",
},
},
schedules: {
computeStartStop: [{
action: azure_native.machinelearningservices.ComputePowerAction.Stop,
cron: {
expression: "0 18 * * *",
startTime: "2021-04-23T01:30:00",
timeZone: "Pacific Standard Time",
},
status: azure_native.machinelearningservices.ScheduleStatus.Enabled,
triggerType: azure_native.machinelearningservices.ComputeTriggerType.Cron,
}],
},
sshSettings: {
sshPublicAccess: azure_native.machinelearningservices.SshPublicAccess.Disabled,
},
vmSize: "STANDARD_NC6",
},
},
resourceGroupName: "testrg123",
workspaceName: "workspaces123",
});
import pulumi
import pulumi_azure_native as azure_native
compute = azure_native.machinelearningservices.Compute("compute",
compute_name="compute123",
location="eastus",
properties={
"compute_type": "ComputeInstance",
"properties": {
"application_sharing_policy": azure_native.machinelearningservices.ApplicationSharingPolicy.PERSONAL,
"compute_instance_authorization_type": azure_native.machinelearningservices.ComputeInstanceAuthorizationType.PERSONAL,
"personal_compute_instance_settings": {
"assigned_user": {
"object_id": "00000000-0000-0000-0000-000000000000",
"tenant_id": "00000000-0000-0000-0000-000000000000",
},
},
"schedules": {
"compute_start_stop": [{
"action": azure_native.machinelearningservices.ComputePowerAction.STOP,
"cron": {
"expression": "0 18 * * *",
"start_time": "2021-04-23T01:30:00",
"time_zone": "Pacific Standard Time",
},
"status": azure_native.machinelearningservices.ScheduleStatus.ENABLED,
"trigger_type": azure_native.machinelearningservices.ComputeTriggerType.CRON,
}],
},
"ssh_settings": {
"ssh_public_access": azure_native.machinelearningservices.SshPublicAccess.DISABLED,
},
"vm_size": "STANDARD_NC6",
},
},
resource_group_name="testrg123",
workspace_name="workspaces123")
package main
import (
machinelearningservices "github.com/pulumi/pulumi-azure-native-sdk/machinelearningservices/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := machinelearningservices.NewCompute(ctx, "compute", &machinelearningservices.ComputeArgs{
ComputeName: pulumi.String("compute123"),
Location: pulumi.String("eastus"),
Properties: &machinelearningservices.ComputeInstanceArgs{
ComputeType: pulumi.String("ComputeInstance"),
Properties: &machinelearningservices.ComputeInstancePropertiesArgs{
ApplicationSharingPolicy: pulumi.String(machinelearningservices.ApplicationSharingPolicyPersonal),
ComputeInstanceAuthorizationType: pulumi.String(machinelearningservices.ComputeInstanceAuthorizationTypePersonal),
PersonalComputeInstanceSettings: &machinelearningservices.PersonalComputeInstanceSettingsArgs{
AssignedUser: &machinelearningservices.AssignedUserArgs{
ObjectId: pulumi.String("00000000-0000-0000-0000-000000000000"),
TenantId: pulumi.String("00000000-0000-0000-0000-000000000000"),
},
},
Schedules: &machinelearningservices.ComputeSchedulesArgs{
ComputeStartStop: machinelearningservices.ComputeStartStopScheduleArray{
&machinelearningservices.ComputeStartStopScheduleArgs{
Action: pulumi.String(machinelearningservices.ComputePowerActionStop),
Cron: &machinelearningservices.CronArgs{
Expression: pulumi.String("0 18 * * *"),
StartTime: pulumi.String("2021-04-23T01:30:00"),
TimeZone: pulumi.String("Pacific Standard Time"),
},
Status: pulumi.String(machinelearningservices.ScheduleStatusEnabled),
TriggerType: pulumi.String(machinelearningservices.ComputeTriggerTypeCron),
},
},
},
SshSettings: &machinelearningservices.ComputeInstanceSshSettingsArgs{
SshPublicAccess: pulumi.String(machinelearningservices.SshPublicAccessDisabled),
},
VmSize: pulumi.String("STANDARD_NC6"),
},
},
ResourceGroupName: pulumi.String("testrg123"),
WorkspaceName: pulumi.String("workspaces123"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var compute = new AzureNative.MachineLearningServices.Compute("compute", new()
{
ComputeName = "compute123",
Location = "eastus",
Properties = new AzureNative.MachineLearningServices.Inputs.ComputeInstanceArgs
{
ComputeType = "ComputeInstance",
Properties = new AzureNative.MachineLearningServices.Inputs.ComputeInstancePropertiesArgs
{
ApplicationSharingPolicy = AzureNative.MachineLearningServices.ApplicationSharingPolicy.Personal,
ComputeInstanceAuthorizationType = AzureNative.MachineLearningServices.ComputeInstanceAuthorizationType.Personal,
PersonalComputeInstanceSettings = new AzureNative.MachineLearningServices.Inputs.PersonalComputeInstanceSettingsArgs
{
AssignedUser = new AzureNative.MachineLearningServices.Inputs.AssignedUserArgs
{
ObjectId = "00000000-0000-0000-0000-000000000000",
TenantId = "00000000-0000-0000-0000-000000000000",
},
},
Schedules = new AzureNative.MachineLearningServices.Inputs.ComputeSchedulesArgs
{
ComputeStartStop = new[]
{
new AzureNative.MachineLearningServices.Inputs.ComputeStartStopScheduleArgs
{
Action = AzureNative.MachineLearningServices.ComputePowerAction.Stop,
Cron = new AzureNative.MachineLearningServices.Inputs.CronArgs
{
Expression = "0 18 * * *",
StartTime = "2021-04-23T01:30:00",
TimeZone = "Pacific Standard Time",
},
Status = AzureNative.MachineLearningServices.ScheduleStatus.Enabled,
TriggerType = AzureNative.MachineLearningServices.ComputeTriggerType.Cron,
},
},
},
SshSettings = new AzureNative.MachineLearningServices.Inputs.ComputeInstanceSshSettingsArgs
{
SshPublicAccess = AzureNative.MachineLearningServices.SshPublicAccess.Disabled,
},
VmSize = "STANDARD_NC6",
},
},
ResourceGroupName = "testrg123",
WorkspaceName = "workspaces123",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.machinelearningservices.Compute;
import com.pulumi.azurenative.machinelearningservices.ComputeArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var compute = new Compute("compute", ComputeArgs.builder()
.computeName("compute123")
.location("eastus")
.properties(ComputeInstanceArgs.builder()
.computeType("ComputeInstance")
.properties(ComputeInstancePropertiesArgs.builder()
.applicationSharingPolicy("Personal")
.computeInstanceAuthorizationType("personal")
.personalComputeInstanceSettings(PersonalComputeInstanceSettingsArgs.builder()
.assignedUser(AssignedUserArgs.builder()
.objectId("00000000-0000-0000-0000-000000000000")
.tenantId("00000000-0000-0000-0000-000000000000")
.build())
.build())
.schedules(ComputeSchedulesArgs.builder()
.computeStartStop(ComputeStartStopScheduleArgs.builder()
.action("Stop")
.cron(CronArgs.builder()
.expression("0 18 * * *")
.startTime("2021-04-23T01:30:00")
.timeZone("Pacific Standard Time")
.build())
.status("Enabled")
.triggerType("Cron")
.build())
.build())
.sshSettings(ComputeInstanceSshSettingsArgs.builder()
.sshPublicAccess("Disabled")
.build())
.vmSize("STANDARD_NC6")
.build())
.build())
.resourceGroupName("testrg123")
.workspaceName("workspaces123")
.build());
}
}
resources:
compute:
type: azure-native:machinelearningservices:Compute
properties:
computeName: compute123
location: eastus
properties:
computeType: ComputeInstance
properties:
applicationSharingPolicy: Personal
computeInstanceAuthorizationType: personal
personalComputeInstanceSettings:
assignedUser:
objectId: 00000000-0000-0000-0000-000000000000
tenantId: 00000000-0000-0000-0000-000000000000
schedules:
computeStartStop:
- action: Stop
cron:
expression: 0 18 * * *
startTime: 2021-04-23T01:30:00
timeZone: Pacific Standard Time
status: Enabled
triggerType: Cron
sshSettings:
sshPublicAccess: Disabled
vmSize: STANDARD_NC6
resourceGroupName: testrg123
workspaceName: workspaces123
The schedules property defines start and stop times using cron expressions. The action property specifies “Stop” to shut down the instance; the cron block sets the schedule (here, 6 PM daily in Pacific time). This configuration extends the basic ComputeInstance with cost optimization, reducing runtime charges without manual intervention.
Run custom services with Docker containers
Data scientists sometimes need specialized tools like RStudio or custom web applications running alongside their notebooks.
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const compute = new azure_native.machinelearningservices.Compute("compute", {
computeName: "compute123",
location: "eastus",
properties: {
computeType: "ComputeInstance",
properties: {
applicationSharingPolicy: azure_native.machinelearningservices.ApplicationSharingPolicy.Personal,
computeInstanceAuthorizationType: azure_native.machinelearningservices.ComputeInstanceAuthorizationType.Personal,
customServices: [{
docker: {
privileged: true,
},
endpoints: [{
name: "connect",
protocol: azure_native.machinelearningservices.Protocol.Http,
published: 4444,
target: 8787,
}],
environmentVariables: {
RSP_LICENSE: {
type: azure_native.machinelearningservices.EnvironmentVariableType.Local,
value: "XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX",
},
},
image: {
reference: "ghcr.io/azure/rstudio-workbench:latest",
type: azure_native.machinelearningservices.ImageType.Docker,
},
kernel: {
argv: [
"option1",
"option2",
"option3",
],
displayName: "TestKernel",
language: "python",
},
name: "rstudio-workbench",
volumes: [{
readOnly: true,
source: "/mnt/azureuser/",
target: "/home/testuser/",
type: azure_native.machinelearningservices.VolumeDefinitionType.Bind,
}],
}],
enableSSO: true,
personalComputeInstanceSettings: {
assignedUser: {
objectId: "00000000-0000-0000-0000-000000000000",
tenantId: "00000000-0000-0000-0000-000000000000",
},
},
sshSettings: {
sshPublicAccess: azure_native.machinelearningservices.SshPublicAccess.Disabled,
},
subnet: {
id: "test-subnet-resource-id",
},
vmSize: "STANDARD_NC6",
},
},
resourceGroupName: "testrg123",
workspaceName: "workspaces123",
});
import pulumi
import pulumi_azure_native as azure_native
compute = azure_native.machinelearningservices.Compute("compute",
compute_name="compute123",
location="eastus",
properties={
"compute_type": "ComputeInstance",
"properties": {
"application_sharing_policy": azure_native.machinelearningservices.ApplicationSharingPolicy.PERSONAL,
"compute_instance_authorization_type": azure_native.machinelearningservices.ComputeInstanceAuthorizationType.PERSONAL,
"custom_services": [{
"docker": {
"privileged": True,
},
"endpoints": [{
"name": "connect",
"protocol": azure_native.machinelearningservices.Protocol.HTTP,
"published": 4444,
"target": 8787,
}],
"environment_variables": {
"RSP_LICENSE": {
"type": azure_native.machinelearningservices.EnvironmentVariableType.LOCAL,
"value": "XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX",
},
},
"image": {
"reference": "ghcr.io/azure/rstudio-workbench:latest",
"type": azure_native.machinelearningservices.ImageType.DOCKER,
},
"kernel": {
"argv": [
"option1",
"option2",
"option3",
],
"display_name": "TestKernel",
"language": "python",
},
"name": "rstudio-workbench",
"volumes": [{
"read_only": True,
"source": "/mnt/azureuser/",
"target": "/home/testuser/",
"type": azure_native.machinelearningservices.VolumeDefinitionType.BIND,
}],
}],
"enable_sso": True,
"personal_compute_instance_settings": {
"assigned_user": {
"object_id": "00000000-0000-0000-0000-000000000000",
"tenant_id": "00000000-0000-0000-0000-000000000000",
},
},
"ssh_settings": {
"ssh_public_access": azure_native.machinelearningservices.SshPublicAccess.DISABLED,
},
"subnet": {
"id": "test-subnet-resource-id",
},
"vm_size": "STANDARD_NC6",
},
},
resource_group_name="testrg123",
workspace_name="workspaces123")
package main
import (
machinelearningservices "github.com/pulumi/pulumi-azure-native-sdk/machinelearningservices/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := machinelearningservices.NewCompute(ctx, "compute", &machinelearningservices.ComputeArgs{
ComputeName: pulumi.String("compute123"),
Location: pulumi.String("eastus"),
Properties: &machinelearningservices.ComputeInstanceArgs{
ComputeType: pulumi.String("ComputeInstance"),
Properties: &machinelearningservices.ComputeInstancePropertiesArgs{
ApplicationSharingPolicy: pulumi.String(machinelearningservices.ApplicationSharingPolicyPersonal),
ComputeInstanceAuthorizationType: pulumi.String(machinelearningservices.ComputeInstanceAuthorizationTypePersonal),
CustomServices: machinelearningservices.CustomServiceArray{
&machinelearningservices.CustomServiceArgs{
Docker: &machinelearningservices.DockerArgs{
Privileged: pulumi.Bool(true),
},
Endpoints: machinelearningservices.EndpointArray{
&machinelearningservices.EndpointArgs{
Name: pulumi.String("connect"),
Protocol: pulumi.String(machinelearningservices.ProtocolHttp),
Published: pulumi.Int(4444),
Target: pulumi.Int(8787),
},
},
EnvironmentVariables: machinelearningservices.EnvironmentVariableMap{
"RSP_LICENSE": &machinelearningservices.EnvironmentVariableArgs{
Type: pulumi.String(machinelearningservices.EnvironmentVariableTypeLocal),
Value: pulumi.String("XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX"),
},
},
Image: &machinelearningservices.ImageArgs{
Reference: pulumi.String("ghcr.io/azure/rstudio-workbench:latest"),
Type: pulumi.String(machinelearningservices.ImageTypeDocker),
},
Kernel: &machinelearningservices.JupyterKernelConfigArgs{
Argv: pulumi.StringArray{
pulumi.String("option1"),
pulumi.String("option2"),
pulumi.String("option3"),
},
DisplayName: pulumi.String("TestKernel"),
Language: pulumi.String("python"),
},
Name: pulumi.String("rstudio-workbench"),
Volumes: machinelearningservices.VolumeDefinitionArray{
&machinelearningservices.VolumeDefinitionArgs{
ReadOnly: pulumi.Bool(true),
Source: pulumi.String("/mnt/azureuser/"),
Target: pulumi.String("/home/testuser/"),
Type: pulumi.String(machinelearningservices.VolumeDefinitionTypeBind),
},
},
},
},
EnableSSO: pulumi.Bool(true),
PersonalComputeInstanceSettings: &machinelearningservices.PersonalComputeInstanceSettingsArgs{
AssignedUser: &machinelearningservices.AssignedUserArgs{
ObjectId: pulumi.String("00000000-0000-0000-0000-000000000000"),
TenantId: pulumi.String("00000000-0000-0000-0000-000000000000"),
},
},
SshSettings: &machinelearningservices.ComputeInstanceSshSettingsArgs{
SshPublicAccess: pulumi.String(machinelearningservices.SshPublicAccessDisabled),
},
Subnet: &machinelearningservices.ResourceIdArgs{
Id: pulumi.String("test-subnet-resource-id"),
},
VmSize: pulumi.String("STANDARD_NC6"),
},
},
ResourceGroupName: pulumi.String("testrg123"),
WorkspaceName: pulumi.String("workspaces123"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var compute = new AzureNative.MachineLearningServices.Compute("compute", new()
{
ComputeName = "compute123",
Location = "eastus",
Properties = new AzureNative.MachineLearningServices.Inputs.ComputeInstanceArgs
{
ComputeType = "ComputeInstance",
Properties = new AzureNative.MachineLearningServices.Inputs.ComputeInstancePropertiesArgs
{
ApplicationSharingPolicy = AzureNative.MachineLearningServices.ApplicationSharingPolicy.Personal,
ComputeInstanceAuthorizationType = AzureNative.MachineLearningServices.ComputeInstanceAuthorizationType.Personal,
CustomServices = new[]
{
new AzureNative.MachineLearningServices.Inputs.CustomServiceArgs
{
Docker = new AzureNative.MachineLearningServices.Inputs.DockerArgs
{
Privileged = true,
},
Endpoints = new[]
{
new AzureNative.MachineLearningServices.Inputs.EndpointArgs
{
Name = "connect",
Protocol = AzureNative.MachineLearningServices.Protocol.Http,
Published = 4444,
Target = 8787,
},
},
EnvironmentVariables =
{
{ "RSP_LICENSE", new AzureNative.MachineLearningServices.Inputs.EnvironmentVariableArgs
{
Type = AzureNative.MachineLearningServices.EnvironmentVariableType.Local,
Value = "XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX",
} },
},
Image = new AzureNative.MachineLearningServices.Inputs.ImageArgs
{
Reference = "ghcr.io/azure/rstudio-workbench:latest",
Type = AzureNative.MachineLearningServices.ImageType.Docker,
},
Kernel = new AzureNative.MachineLearningServices.Inputs.JupyterKernelConfigArgs
{
Argv = new[]
{
"option1",
"option2",
"option3",
},
DisplayName = "TestKernel",
Language = "python",
},
Name = "rstudio-workbench",
Volumes = new[]
{
new AzureNative.MachineLearningServices.Inputs.VolumeDefinitionArgs
{
ReadOnly = true,
Source = "/mnt/azureuser/",
Target = "/home/testuser/",
Type = AzureNative.MachineLearningServices.VolumeDefinitionType.Bind,
},
},
},
},
EnableSSO = true,
PersonalComputeInstanceSettings = new AzureNative.MachineLearningServices.Inputs.PersonalComputeInstanceSettingsArgs
{
AssignedUser = new AzureNative.MachineLearningServices.Inputs.AssignedUserArgs
{
ObjectId = "00000000-0000-0000-0000-000000000000",
TenantId = "00000000-0000-0000-0000-000000000000",
},
},
SshSettings = new AzureNative.MachineLearningServices.Inputs.ComputeInstanceSshSettingsArgs
{
SshPublicAccess = AzureNative.MachineLearningServices.SshPublicAccess.Disabled,
},
Subnet = new AzureNative.MachineLearningServices.Inputs.ResourceIdArgs
{
Id = "test-subnet-resource-id",
},
VmSize = "STANDARD_NC6",
},
},
ResourceGroupName = "testrg123",
WorkspaceName = "workspaces123",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.machinelearningservices.Compute;
import com.pulumi.azurenative.machinelearningservices.ComputeArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var compute = new Compute("compute", ComputeArgs.builder()
.computeName("compute123")
.location("eastus")
.properties(ComputeInstanceArgs.builder()
.computeType("ComputeInstance")
.properties(ComputeInstancePropertiesArgs.builder()
.applicationSharingPolicy("Personal")
.computeInstanceAuthorizationType("personal")
.customServices(CustomServiceArgs.builder()
.docker(DockerArgs.builder()
.privileged(true)
.build())
.endpoints(EndpointArgs.builder()
.name("connect")
.protocol("http")
.published(4444)
.target(8787)
.build())
.environmentVariables(Map.of("RSP_LICENSE", EnvironmentVariableArgs.builder()
.type("local")
.value("XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX")
.build()))
.image(ImageArgs.builder()
.reference("ghcr.io/azure/rstudio-workbench:latest")
.type("docker")
.build())
.kernel(JupyterKernelConfigArgs.builder()
.argv(
"option1",
"option2",
"option3")
.displayName("TestKernel")
.language("python")
.build())
.name("rstudio-workbench")
.volumes(VolumeDefinitionArgs.builder()
.readOnly(true)
.source("/mnt/azureuser/")
.target("/home/testuser/")
.type("bind")
.build())
.build())
.enableSSO(true)
.personalComputeInstanceSettings(PersonalComputeInstanceSettingsArgs.builder()
.assignedUser(AssignedUserArgs.builder()
.objectId("00000000-0000-0000-0000-000000000000")
.tenantId("00000000-0000-0000-0000-000000000000")
.build())
.build())
.sshSettings(ComputeInstanceSshSettingsArgs.builder()
.sshPublicAccess("Disabled")
.build())
.subnet(ResourceIdArgs.builder()
.id("test-subnet-resource-id")
.build())
.vmSize("STANDARD_NC6")
.build())
.build())
.resourceGroupName("testrg123")
.workspaceName("workspaces123")
.build());
}
}
resources:
compute:
type: azure-native:machinelearningservices:Compute
properties:
computeName: compute123
location: eastus
properties:
computeType: ComputeInstance
properties:
applicationSharingPolicy: Personal
computeInstanceAuthorizationType: personal
customServices:
- docker:
privileged: true
endpoints:
- name: connect
protocol: http
published: 4444
target: 8787
environmentVariables:
RSP_LICENSE:
type: local
value: XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX
image:
reference: ghcr.io/azure/rstudio-workbench:latest
type: docker
kernel:
argv:
- option1
- option2
- option3
displayName: TestKernel
language: python
name: rstudio-workbench
volumes:
- readOnly: true
source: /mnt/azureuser/
target: /home/testuser/
type: bind
enableSSO: true
personalComputeInstanceSettings:
assignedUser:
objectId: 00000000-0000-0000-0000-000000000000
tenantId: 00000000-0000-0000-0000-000000000000
sshSettings:
sshPublicAccess: Disabled
subnet:
id: test-subnet-resource-id
vmSize: STANDARD_NC6
resourceGroupName: testrg123
workspaceName: workspaces123
The customServices array defines Docker containers to run on the instance. Each service specifies an image reference, exposed endpoints (with port mappings), environment variables, and volume mounts. The endpoints array maps container ports to published ports accessible from the browser. This configuration extends the basic ComputeInstance with custom tooling, enabling workflows that require specialized development environments.
Beyond these examples
These snippets focus on specific compute resource features: compute instance creation and scheduling, AmlCompute autoscaling configuration, and custom Docker services and volume mounts. They’re intentionally minimal rather than full ML platform deployments.
The examples may reference pre-existing infrastructure such as Azure Machine Learning workspaces, resource groups and virtual network subnets, custom VM images from Azure Compute Gallery, and Docker container registries. They focus on configuring compute resources rather than provisioning the surrounding workspace infrastructure.
To keep things focused, common compute patterns are omitted, including:
- AKS and DataFactory compute types (shown but not detailed)
- SSH configuration and public key management
- User assignment and authorization policies
- Network isolation and private endpoints
- Identity and managed service identity configuration
These omissions are intentional: the goal is to illustrate how each compute feature is wired, not provide drop-in ML platform modules. See the Compute resource reference for all available configuration options.
Let's configure Azure Machine Learning Compute
Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.
Try Pulumi Cloud for FREEFrequently Asked Questions
Compute Types & Configuration
AmlCompute (managed compute clusters), ComputeInstance (single-node development environments), AKS (Azure Kubernetes Service), and DataFactory.vmSize is required. Set computeType to ComputeInstance and specify a vmSize like STANDARD_NC6.computeName, resourceGroupName, and workspaceName properties are immutable and cannot be modified after the resource is created.Scaling & Resource Management
scaleSettings property with maxNodeCount, minNodeCount, and nodeIdleTimeBeforeScaleDown to control automatic scaling behavior.PT5M for 5 minutes, PT1H for 1 hour, etc. This applies to the nodeIdleTimeBeforeScaleDown property.scaleSettings.maxNodeCount and scaleSettings.minNodeCount on existing clusters.Scheduling & Automation
schedules.computeStartStop with a cron expression, startTime, timeZone, and action (Stop or Start). For example, expression: "0 18 * * *" stops the instance at 6 PM daily.Networking & Security
sshSettings.sshPublicAccess and set it to Disabled to block SSH access, or configure it with your SSH public key for access.subnet property with a resource ID to connect AmlCompute or ComputeInstance to a specific virtual network subnet.Advanced Features
customServices with Docker image details, endpoints, volumes, and environment variables to run custom containerized services.computeType to AKS and provide the resourceId of your existing Azure Kubernetes Service cluster.virtualMachineImage.id with the resource ID of an image from your Azure Compute Gallery.