published on Wednesday, Apr 29, 2026 by Pulumi
published on Wednesday, Apr 29, 2026 by Pulumi
Provides a DigitalOcean Dedicated Inference Token resource. This can be used to create and revoke API tokens for dedicated inference endpoints.
Note: The
tokenattribute is only available immediately after creation and cannot be retrieved afterwards. Make sure to store it securely.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as digitalocean from "@pulumi/digitalocean";
const example = new digitalocean.DedicatedInference("example", {
name: "my-inference-endpoint",
region: "tor1",
modelDeployments: [{
modelSlug: "deepseek-r1-distill-qwen-14b",
modelProvider: "digitalocean",
accelerators: [{
acceleratorSlug: "gpu-h100x1-80gb",
scale: 1,
type: "nvidia_h100",
}],
}],
});
const exampleDedicatedInferenceToken = new digitalocean.DedicatedInferenceToken("example", {
dedicatedInferenceId: example.id,
name: "my-api-token",
});
import pulumi
import pulumi_digitalocean as digitalocean
example = digitalocean.DedicatedInference("example",
name="my-inference-endpoint",
region="tor1",
model_deployments=[{
"model_slug": "deepseek-r1-distill-qwen-14b",
"model_provider": "digitalocean",
"accelerators": [{
"accelerator_slug": "gpu-h100x1-80gb",
"scale": 1,
"type": "nvidia_h100",
}],
}])
example_dedicated_inference_token = digitalocean.DedicatedInferenceToken("example",
dedicated_inference_id=example.id,
name="my-api-token")
package main
import (
"github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
example, err := digitalocean.NewDedicatedInference(ctx, "example", &digitalocean.DedicatedInferenceArgs{
Name: pulumi.String("my-inference-endpoint"),
Region: pulumi.String("tor1"),
ModelDeployments: digitalocean.DedicatedInferenceModelDeploymentArray{
&digitalocean.DedicatedInferenceModelDeploymentArgs{
ModelSlug: pulumi.String("deepseek-r1-distill-qwen-14b"),
ModelProvider: pulumi.String("digitalocean"),
Accelerators: digitalocean.DedicatedInferenceModelDeploymentAcceleratorArray{
&digitalocean.DedicatedInferenceModelDeploymentAcceleratorArgs{
AcceleratorSlug: pulumi.String("gpu-h100x1-80gb"),
Scale: pulumi.Int(1),
Type: pulumi.String("nvidia_h100"),
},
},
},
},
})
if err != nil {
return err
}
_, err = digitalocean.NewDedicatedInferenceToken(ctx, "example", &digitalocean.DedicatedInferenceTokenArgs{
DedicatedInferenceId: example.ID(),
Name: pulumi.String("my-api-token"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using DigitalOcean = Pulumi.DigitalOcean;
return await Deployment.RunAsync(() =>
{
var example = new DigitalOcean.Index.DedicatedInference("example", new()
{
Name = "my-inference-endpoint",
Region = "tor1",
ModelDeployments = new[]
{
new DigitalOcean.Inputs.DedicatedInferenceModelDeploymentArgs
{
ModelSlug = "deepseek-r1-distill-qwen-14b",
ModelProvider = "digitalocean",
Accelerators = new[]
{
new DigitalOcean.Inputs.DedicatedInferenceModelDeploymentAcceleratorArgs
{
AcceleratorSlug = "gpu-h100x1-80gb",
Scale = 1,
Type = "nvidia_h100",
},
},
},
},
});
var exampleDedicatedInferenceToken = new DigitalOcean.Index.DedicatedInferenceToken("example", new()
{
DedicatedInferenceId = example.Id,
Name = "my-api-token",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.digitalocean.DedicatedInference;
import com.pulumi.digitalocean.DedicatedInferenceArgs;
import com.pulumi.digitalocean.inputs.DedicatedInferenceModelDeploymentArgs;
import com.pulumi.digitalocean.DedicatedInferenceToken;
import com.pulumi.digitalocean.DedicatedInferenceTokenArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new DedicatedInference("example", DedicatedInferenceArgs.builder()
.name("my-inference-endpoint")
.region("tor1")
.modelDeployments(DedicatedInferenceModelDeploymentArgs.builder()
.modelSlug("deepseek-r1-distill-qwen-14b")
.modelProvider("digitalocean")
.accelerators(DedicatedInferenceModelDeploymentAcceleratorArgs.builder()
.acceleratorSlug("gpu-h100x1-80gb")
.scale(1)
.type("nvidia_h100")
.build())
.build())
.build());
var exampleDedicatedInferenceToken = new DedicatedInferenceToken("exampleDedicatedInferenceToken", DedicatedInferenceTokenArgs.builder()
.dedicatedInferenceId(example.id())
.name("my-api-token")
.build());
}
}
resources:
example:
type: digitalocean:DedicatedInference
properties:
name: my-inference-endpoint
region: tor1
modelDeployments:
- modelSlug: deepseek-r1-distill-qwen-14b
modelProvider: digitalocean
accelerators:
- acceleratorSlug: gpu-h100x1-80gb
scale: 1
type: nvidia_h100
exampleDedicatedInferenceToken:
type: digitalocean:DedicatedInferenceToken
name: example
properties:
dedicatedInferenceId: ${example.id}
name: my-api-token
Create DedicatedInferenceToken Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new DedicatedInferenceToken(name: string, args: DedicatedInferenceTokenArgs, opts?: CustomResourceOptions);@overload
def DedicatedInferenceToken(resource_name: str,
args: DedicatedInferenceTokenArgs,
opts: Optional[ResourceOptions] = None)
@overload
def DedicatedInferenceToken(resource_name: str,
opts: Optional[ResourceOptions] = None,
dedicated_inference_id: Optional[str] = None,
name: Optional[str] = None)func NewDedicatedInferenceToken(ctx *Context, name string, args DedicatedInferenceTokenArgs, opts ...ResourceOption) (*DedicatedInferenceToken, error)public DedicatedInferenceToken(string name, DedicatedInferenceTokenArgs args, CustomResourceOptions? opts = null)
public DedicatedInferenceToken(String name, DedicatedInferenceTokenArgs args)
public DedicatedInferenceToken(String name, DedicatedInferenceTokenArgs args, CustomResourceOptions options)
type: digitalocean:DedicatedInferenceToken
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args DedicatedInferenceTokenArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args DedicatedInferenceTokenArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args DedicatedInferenceTokenArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args DedicatedInferenceTokenArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args DedicatedInferenceTokenArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var dedicatedInferenceTokenResource = new DigitalOcean.DedicatedInferenceToken("dedicatedInferenceTokenResource", new()
{
DedicatedInferenceId = "string",
Name = "string",
});
example, err := digitalocean.NewDedicatedInferenceToken(ctx, "dedicatedInferenceTokenResource", &digitalocean.DedicatedInferenceTokenArgs{
DedicatedInferenceId: pulumi.String("string"),
Name: pulumi.String("string"),
})
var dedicatedInferenceTokenResource = new DedicatedInferenceToken("dedicatedInferenceTokenResource", DedicatedInferenceTokenArgs.builder()
.dedicatedInferenceId("string")
.name("string")
.build());
dedicated_inference_token_resource = digitalocean.DedicatedInferenceToken("dedicatedInferenceTokenResource",
dedicated_inference_id="string",
name="string")
const dedicatedInferenceTokenResource = new digitalocean.DedicatedInferenceToken("dedicatedInferenceTokenResource", {
dedicatedInferenceId: "string",
name: "string",
});
type: digitalocean:DedicatedInferenceToken
properties:
dedicatedInferenceId: string
name: string
DedicatedInferenceToken Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The DedicatedInferenceToken resource accepts the following input properties:
- Dedicated
Inference stringId - The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
- Name string
- A human-readable name for the token. Changing this forces a new resource.
- Dedicated
Inference stringId - The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
- Name string
- A human-readable name for the token. Changing this forces a new resource.
- dedicated
Inference StringId - The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
- name String
- A human-readable name for the token. Changing this forces a new resource.
- dedicated
Inference stringId - The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
- name string
- A human-readable name for the token. Changing this forces a new resource.
- dedicated_
inference_ strid - The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
- name str
- A human-readable name for the token. Changing this forces a new resource.
- dedicated
Inference StringId - The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
- name String
- A human-readable name for the token. Changing this forces a new resource.
Outputs
All input properties are implicitly available as output properties. Additionally, the DedicatedInferenceToken resource produces the following output properties:
- created_
at str - The date and time when the token was created.
- id str
- The provider-assigned unique ID for this managed resource.
- token str
- (Sensitive) The token value. Only available immediately after creation and not retrievable afterwards.
Look up Existing DedicatedInferenceToken Resource
Get an existing DedicatedInferenceToken resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: DedicatedInferenceTokenState, opts?: CustomResourceOptions): DedicatedInferenceToken@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
created_at: Optional[str] = None,
dedicated_inference_id: Optional[str] = None,
name: Optional[str] = None,
token: Optional[str] = None) -> DedicatedInferenceTokenfunc GetDedicatedInferenceToken(ctx *Context, name string, id IDInput, state *DedicatedInferenceTokenState, opts ...ResourceOption) (*DedicatedInferenceToken, error)public static DedicatedInferenceToken Get(string name, Input<string> id, DedicatedInferenceTokenState? state, CustomResourceOptions? opts = null)public static DedicatedInferenceToken get(String name, Output<String> id, DedicatedInferenceTokenState state, CustomResourceOptions options)resources: _: type: digitalocean:DedicatedInferenceToken get: id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Created
At string - The date and time when the token was created.
- Dedicated
Inference stringId - The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
- Name string
- A human-readable name for the token. Changing this forces a new resource.
- Token string
- (Sensitive) The token value. Only available immediately after creation and not retrievable afterwards.
- Created
At string - The date and time when the token was created.
- Dedicated
Inference stringId - The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
- Name string
- A human-readable name for the token. Changing this forces a new resource.
- Token string
- (Sensitive) The token value. Only available immediately after creation and not retrievable afterwards.
- created
At String - The date and time when the token was created.
- dedicated
Inference StringId - The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
- name String
- A human-readable name for the token. Changing this forces a new resource.
- token String
- (Sensitive) The token value. Only available immediately after creation and not retrievable afterwards.
- created
At string - The date and time when the token was created.
- dedicated
Inference stringId - The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
- name string
- A human-readable name for the token. Changing this forces a new resource.
- token string
- (Sensitive) The token value. Only available immediately after creation and not retrievable afterwards.
- created_
at str - The date and time when the token was created.
- dedicated_
inference_ strid - The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
- name str
- A human-readable name for the token. Changing this forces a new resource.
- token str
- (Sensitive) The token value. Only available immediately after creation and not retrievable afterwards.
- created
At String - The date and time when the token was created.
- dedicated
Inference StringId - The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
- name String
- A human-readable name for the token. Changing this forces a new resource.
- token String
- (Sensitive) The token value. Only available immediately after creation and not retrievable afterwards.
Import
Dedicated inference tokens can be imported using the composite ID
{dedicated_inference_id}:{token_id}, e.g.
$ pulumi import digitalocean:index/dedicatedInferenceToken:DedicatedInferenceToken example endpoint-id:token-id
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- DigitalOcean pulumi/pulumi-digitalocean
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
digitaloceanTerraform Provider.
published on Wednesday, Apr 29, 2026 by Pulumi
