Viewing docs for DigitalOcean v4.65.0
published on Wednesday, Apr 29, 2026 by Pulumi
published on Wednesday, Apr 29, 2026 by Pulumi
Viewing docs for DigitalOcean v4.65.0
published on Wednesday, Apr 29, 2026 by Pulumi
published on Wednesday, Apr 29, 2026 by Pulumi
Get information on a dedicated inference endpoint for use in other resources. This data source provides all of the endpoint’s properties as configured on your DigitalOcean account.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as digitalocean from "@pulumi/digitalocean";
const example = digitalocean.getDedicatedInference({
id: "endpoint-id",
});
export const endpointStatus = example.then(example => example.status);
import pulumi
import pulumi_digitalocean as digitalocean
example = digitalocean.get_dedicated_inference(id="endpoint-id")
pulumi.export("endpointStatus", example.status)
package main
import (
"github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
example, err := digitalocean.GetDedicatedInference(ctx, &digitalocean.LookupDedicatedInferenceArgs{
Id: "endpoint-id",
}, nil)
if err != nil {
return err
}
ctx.Export("endpointStatus", example.Status)
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using DigitalOcean = Pulumi.DigitalOcean;
return await Deployment.RunAsync(() =>
{
var example = DigitalOcean.Index.GetDedicatedInference.Invoke(new()
{
Id = "endpoint-id",
});
return new Dictionary<string, object?>
{
["endpointStatus"] = example.Apply(getDedicatedInferenceResult => getDedicatedInferenceResult.Status),
};
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.digitalocean.DigitaloceanFunctions;
import com.pulumi.digitalocean.inputs.GetDedicatedInferenceArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var example = DigitaloceanFunctions.getDedicatedInference(GetDedicatedInferenceArgs.builder()
.id("endpoint-id")
.build());
ctx.export("endpointStatus", example.status());
}
}
variables:
example:
fn::invoke:
function: digitalocean:getDedicatedInference
arguments:
id: endpoint-id
outputs:
endpointStatus: ${example.status}
Using getDedicatedInference
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getDedicatedInference(args: GetDedicatedInferenceArgs, opts?: InvokeOptions): Promise<GetDedicatedInferenceResult>
function getDedicatedInferenceOutput(args: GetDedicatedInferenceOutputArgs, opts?: InvokeOptions): Output<GetDedicatedInferenceResult>def get_dedicated_inference(id: Optional[str] = None,
opts: Optional[InvokeOptions] = None) -> GetDedicatedInferenceResult
def get_dedicated_inference_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[InvokeOptions] = None) -> Output[GetDedicatedInferenceResult]func LookupDedicatedInference(ctx *Context, args *LookupDedicatedInferenceArgs, opts ...InvokeOption) (*LookupDedicatedInferenceResult, error)
func LookupDedicatedInferenceOutput(ctx *Context, args *LookupDedicatedInferenceOutputArgs, opts ...InvokeOption) LookupDedicatedInferenceResultOutput> Note: This function is named LookupDedicatedInference in the Go SDK.
public static class GetDedicatedInference
{
public static Task<GetDedicatedInferenceResult> InvokeAsync(GetDedicatedInferenceArgs args, InvokeOptions? opts = null)
public static Output<GetDedicatedInferenceResult> Invoke(GetDedicatedInferenceInvokeArgs args, InvokeOptions? opts = null)
}public static CompletableFuture<GetDedicatedInferenceResult> getDedicatedInference(GetDedicatedInferenceArgs args, InvokeOptions options)
public static Output<GetDedicatedInferenceResult> getDedicatedInference(GetDedicatedInferenceArgs args, InvokeOptions options)
fn::invoke:
function: digitalocean:index/getDedicatedInference:getDedicatedInference
arguments:
# arguments dictionaryThe following arguments are supported:
- Id string
- The ID of the dedicated inference endpoint.
- Id string
- The ID of the dedicated inference endpoint.
- id String
- The ID of the dedicated inference endpoint.
- id string
- The ID of the dedicated inference endpoint.
- id str
- The ID of the dedicated inference endpoint.
- id String
- The ID of the dedicated inference endpoint.
getDedicatedInference Result
The following output properties are available:
- Created
At string - The date and time when the dedicated inference endpoint was created.
- Enable
Public boolEndpoint - Whether the public HTTPS endpoint is enabled.
- Id string
- Model
Deployments List<Pulumi.Digital Ocean. Outputs. Get Dedicated Inference Model Deployment> - The list of model deployments running on the endpoint. Each element contains:
- Name string
- The name of the dedicated inference endpoint.
- Private
Endpoint stringFqdn - The fully-qualified domain name of the private endpoint.
- Public
Endpoint stringFqdn - The fully-qualified domain name of the public endpoint, if enabled.
- Region string
- The region where the dedicated inference endpoint is deployed.
- Status string
- The current status of the dedicated inference endpoint.
- Updated
At string - The date and time when the dedicated inference endpoint was last updated.
- Vpc
Uuid string - The UUID of the VPC the dedicated inference endpoint is deployed in.
- Created
At string - The date and time when the dedicated inference endpoint was created.
- Enable
Public boolEndpoint - Whether the public HTTPS endpoint is enabled.
- Id string
- Model
Deployments []GetDedicated Inference Model Deployment - The list of model deployments running on the endpoint. Each element contains:
- Name string
- The name of the dedicated inference endpoint.
- Private
Endpoint stringFqdn - The fully-qualified domain name of the private endpoint.
- Public
Endpoint stringFqdn - The fully-qualified domain name of the public endpoint, if enabled.
- Region string
- The region where the dedicated inference endpoint is deployed.
- Status string
- The current status of the dedicated inference endpoint.
- Updated
At string - The date and time when the dedicated inference endpoint was last updated.
- Vpc
Uuid string - The UUID of the VPC the dedicated inference endpoint is deployed in.
- created
At String - The date and time when the dedicated inference endpoint was created.
- enable
Public BooleanEndpoint - Whether the public HTTPS endpoint is enabled.
- id String
- model
Deployments List<GetDedicated Inference Model Deployment> - The list of model deployments running on the endpoint. Each element contains:
- name String
- The name of the dedicated inference endpoint.
- private
Endpoint StringFqdn - The fully-qualified domain name of the private endpoint.
- public
Endpoint StringFqdn - The fully-qualified domain name of the public endpoint, if enabled.
- region String
- The region where the dedicated inference endpoint is deployed.
- status String
- The current status of the dedicated inference endpoint.
- updated
At String - The date and time when the dedicated inference endpoint was last updated.
- vpc
Uuid String - The UUID of the VPC the dedicated inference endpoint is deployed in.
- created
At string - The date and time when the dedicated inference endpoint was created.
- enable
Public booleanEndpoint - Whether the public HTTPS endpoint is enabled.
- id string
- model
Deployments GetDedicated Inference Model Deployment[] - The list of model deployments running on the endpoint. Each element contains:
- name string
- The name of the dedicated inference endpoint.
- private
Endpoint stringFqdn - The fully-qualified domain name of the private endpoint.
- public
Endpoint stringFqdn - The fully-qualified domain name of the public endpoint, if enabled.
- region string
- The region where the dedicated inference endpoint is deployed.
- status string
- The current status of the dedicated inference endpoint.
- updated
At string - The date and time when the dedicated inference endpoint was last updated.
- vpc
Uuid string - The UUID of the VPC the dedicated inference endpoint is deployed in.
- created_
at str - The date and time when the dedicated inference endpoint was created.
- enable_
public_ boolendpoint - Whether the public HTTPS endpoint is enabled.
- id str
- model_
deployments Sequence[GetDedicated Inference Model Deployment] - The list of model deployments running on the endpoint. Each element contains:
- name str
- The name of the dedicated inference endpoint.
- private_
endpoint_ strfqdn - The fully-qualified domain name of the private endpoint.
- public_
endpoint_ strfqdn - The fully-qualified domain name of the public endpoint, if enabled.
- region str
- The region where the dedicated inference endpoint is deployed.
- status str
- The current status of the dedicated inference endpoint.
- updated_
at str - The date and time when the dedicated inference endpoint was last updated.
- vpc_
uuid str - The UUID of the VPC the dedicated inference endpoint is deployed in.
- created
At String - The date and time when the dedicated inference endpoint was created.
- enable
Public BooleanEndpoint - Whether the public HTTPS endpoint is enabled.
- id String
- model
Deployments List<Property Map> - The list of model deployments running on the endpoint. Each element contains:
- name String
- The name of the dedicated inference endpoint.
- private
Endpoint StringFqdn - The fully-qualified domain name of the private endpoint.
- public
Endpoint StringFqdn - The fully-qualified domain name of the public endpoint, if enabled.
- region String
- The region where the dedicated inference endpoint is deployed.
- status String
- The current status of the dedicated inference endpoint.
- updated
At String - The date and time when the dedicated inference endpoint was last updated.
- vpc
Uuid String - The UUID of the VPC the dedicated inference endpoint is deployed in.
Supporting Types
GetDedicatedInferenceModelDeployment
- Accelerators
List<Pulumi.
Digital Ocean. Inputs. Get Dedicated Inference Model Deployment Accelerator> - The GPU accelerators allocated for this model deployment. Each element contains:
- Model
Id string - The unique ID of the model.
- Model
Provider string - The provider of the model.
- Model
Slug string - The slug identifier for the model.
- Provider
Model stringId - The provider-specific model ID.
- Accelerators
[]Get
Dedicated Inference Model Deployment Accelerator - The GPU accelerators allocated for this model deployment. Each element contains:
- Model
Id string - The unique ID of the model.
- Model
Provider string - The provider of the model.
- Model
Slug string - The slug identifier for the model.
- Provider
Model stringId - The provider-specific model ID.
- accelerators
List<Get
Dedicated Inference Model Deployment Accelerator> - The GPU accelerators allocated for this model deployment. Each element contains:
- model
Id String - The unique ID of the model.
- model
Provider String - The provider of the model.
- model
Slug String - The slug identifier for the model.
- provider
Model StringId - The provider-specific model ID.
- accelerators
Get
Dedicated Inference Model Deployment Accelerator[] - The GPU accelerators allocated for this model deployment. Each element contains:
- model
Id string - The unique ID of the model.
- model
Provider string - The provider of the model.
- model
Slug string - The slug identifier for the model.
- provider
Model stringId - The provider-specific model ID.
- accelerators
Sequence[Get
Dedicated Inference Model Deployment Accelerator] - The GPU accelerators allocated for this model deployment. Each element contains:
- model_
id str - The unique ID of the model.
- model_
provider str - The provider of the model.
- model_
slug str - The slug identifier for the model.
- provider_
model_ strid - The provider-specific model ID.
- accelerators List<Property Map>
- The GPU accelerators allocated for this model deployment. Each element contains:
- model
Id String - The unique ID of the model.
- model
Provider String - The provider of the model.
- model
Slug String - The slug identifier for the model.
- provider
Model StringId - The provider-specific model ID.
GetDedicatedInferenceModelDeploymentAccelerator
- Accelerator
Slug string - The slug identifier for the GPU accelerator type.
- Scale int
- The number of accelerator units allocated.
- Type string
- The accelerator type.
- Accelerator
Slug string - The slug identifier for the GPU accelerator type.
- Scale int
- The number of accelerator units allocated.
- Type string
- The accelerator type.
- accelerator
Slug String - The slug identifier for the GPU accelerator type.
- scale Integer
- The number of accelerator units allocated.
- type String
- The accelerator type.
- accelerator
Slug string - The slug identifier for the GPU accelerator type.
- scale number
- The number of accelerator units allocated.
- type string
- The accelerator type.
- accelerator_
slug str - The slug identifier for the GPU accelerator type.
- scale int
- The number of accelerator units allocated.
- type str
- The accelerator type.
- accelerator
Slug String - The slug identifier for the GPU accelerator type.
- scale Number
- The number of accelerator units allocated.
- type String
- The accelerator type.
Package Details
- Repository
- DigitalOcean pulumi/pulumi-digitalocean
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
digitaloceanTerraform Provider.
Viewing docs for DigitalOcean v4.65.0
published on Wednesday, Apr 29, 2026 by Pulumi
published on Wednesday, Apr 29, 2026 by Pulumi
