1. Packages
  2. Packages
  3. DigitalOcean Provider
  4. API Docs
  5. getDedicatedInference
Viewing docs for DigitalOcean v4.65.0
published on Wednesday, Apr 29, 2026 by Pulumi
digitalocean logo
Viewing docs for DigitalOcean v4.65.0
published on Wednesday, Apr 29, 2026 by Pulumi

    Get information on a dedicated inference endpoint for use in other resources. This data source provides all of the endpoint’s properties as configured on your DigitalOcean account.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as digitalocean from "@pulumi/digitalocean";
    
    const example = digitalocean.getDedicatedInference({
        id: "endpoint-id",
    });
    export const endpointStatus = example.then(example => example.status);
    
    import pulumi
    import pulumi_digitalocean as digitalocean
    
    example = digitalocean.get_dedicated_inference(id="endpoint-id")
    pulumi.export("endpointStatus", example.status)
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		example, err := digitalocean.GetDedicatedInference(ctx, &digitalocean.LookupDedicatedInferenceArgs{
    			Id: "endpoint-id",
    		}, nil)
    		if err != nil {
    			return err
    		}
    		ctx.Export("endpointStatus", example.Status)
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using DigitalOcean = Pulumi.DigitalOcean;
    
    return await Deployment.RunAsync(() => 
    {
        var example = DigitalOcean.Index.GetDedicatedInference.Invoke(new()
        {
            Id = "endpoint-id",
        });
    
        return new Dictionary<string, object?>
        {
            ["endpointStatus"] = example.Apply(getDedicatedInferenceResult => getDedicatedInferenceResult.Status),
        };
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.digitalocean.DigitaloceanFunctions;
    import com.pulumi.digitalocean.inputs.GetDedicatedInferenceArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var example = DigitaloceanFunctions.getDedicatedInference(GetDedicatedInferenceArgs.builder()
                .id("endpoint-id")
                .build());
    
            ctx.export("endpointStatus", example.status());
        }
    }
    
    variables:
      example:
        fn::invoke:
          function: digitalocean:getDedicatedInference
          arguments:
            id: endpoint-id
    outputs:
      endpointStatus: ${example.status}
    

    Using getDedicatedInference

    Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.

    function getDedicatedInference(args: GetDedicatedInferenceArgs, opts?: InvokeOptions): Promise<GetDedicatedInferenceResult>
    function getDedicatedInferenceOutput(args: GetDedicatedInferenceOutputArgs, opts?: InvokeOptions): Output<GetDedicatedInferenceResult>
    def get_dedicated_inference(id: Optional[str] = None,
                                opts: Optional[InvokeOptions] = None) -> GetDedicatedInferenceResult
    def get_dedicated_inference_output(id: Optional[pulumi.Input[str]] = None,
                                opts: Optional[InvokeOptions] = None) -> Output[GetDedicatedInferenceResult]
    func LookupDedicatedInference(ctx *Context, args *LookupDedicatedInferenceArgs, opts ...InvokeOption) (*LookupDedicatedInferenceResult, error)
    func LookupDedicatedInferenceOutput(ctx *Context, args *LookupDedicatedInferenceOutputArgs, opts ...InvokeOption) LookupDedicatedInferenceResultOutput

    > Note: This function is named LookupDedicatedInference in the Go SDK.

    public static class GetDedicatedInference 
    {
        public static Task<GetDedicatedInferenceResult> InvokeAsync(GetDedicatedInferenceArgs args, InvokeOptions? opts = null)
        public static Output<GetDedicatedInferenceResult> Invoke(GetDedicatedInferenceInvokeArgs args, InvokeOptions? opts = null)
    }
    public static CompletableFuture<GetDedicatedInferenceResult> getDedicatedInference(GetDedicatedInferenceArgs args, InvokeOptions options)
    public static Output<GetDedicatedInferenceResult> getDedicatedInference(GetDedicatedInferenceArgs args, InvokeOptions options)
    
    fn::invoke:
      function: digitalocean:index/getDedicatedInference:getDedicatedInference
      arguments:
        # arguments dictionary

    The following arguments are supported:

    Id string
    The ID of the dedicated inference endpoint.
    Id string
    The ID of the dedicated inference endpoint.
    id String
    The ID of the dedicated inference endpoint.
    id string
    The ID of the dedicated inference endpoint.
    id str
    The ID of the dedicated inference endpoint.
    id String
    The ID of the dedicated inference endpoint.

    getDedicatedInference Result

    The following output properties are available:

    CreatedAt string
    The date and time when the dedicated inference endpoint was created.
    EnablePublicEndpoint bool
    Whether the public HTTPS endpoint is enabled.
    Id string
    ModelDeployments List<Pulumi.DigitalOcean.Outputs.GetDedicatedInferenceModelDeployment>
    The list of model deployments running on the endpoint. Each element contains:
    Name string
    The name of the dedicated inference endpoint.
    PrivateEndpointFqdn string
    The fully-qualified domain name of the private endpoint.
    PublicEndpointFqdn string
    The fully-qualified domain name of the public endpoint, if enabled.
    Region string
    The region where the dedicated inference endpoint is deployed.
    Status string
    The current status of the dedicated inference endpoint.
    UpdatedAt string
    The date and time when the dedicated inference endpoint was last updated.
    VpcUuid string
    The UUID of the VPC the dedicated inference endpoint is deployed in.
    CreatedAt string
    The date and time when the dedicated inference endpoint was created.
    EnablePublicEndpoint bool
    Whether the public HTTPS endpoint is enabled.
    Id string
    ModelDeployments []GetDedicatedInferenceModelDeployment
    The list of model deployments running on the endpoint. Each element contains:
    Name string
    The name of the dedicated inference endpoint.
    PrivateEndpointFqdn string
    The fully-qualified domain name of the private endpoint.
    PublicEndpointFqdn string
    The fully-qualified domain name of the public endpoint, if enabled.
    Region string
    The region where the dedicated inference endpoint is deployed.
    Status string
    The current status of the dedicated inference endpoint.
    UpdatedAt string
    The date and time when the dedicated inference endpoint was last updated.
    VpcUuid string
    The UUID of the VPC the dedicated inference endpoint is deployed in.
    createdAt String
    The date and time when the dedicated inference endpoint was created.
    enablePublicEndpoint Boolean
    Whether the public HTTPS endpoint is enabled.
    id String
    modelDeployments List<GetDedicatedInferenceModelDeployment>
    The list of model deployments running on the endpoint. Each element contains:
    name String
    The name of the dedicated inference endpoint.
    privateEndpointFqdn String
    The fully-qualified domain name of the private endpoint.
    publicEndpointFqdn String
    The fully-qualified domain name of the public endpoint, if enabled.
    region String
    The region where the dedicated inference endpoint is deployed.
    status String
    The current status of the dedicated inference endpoint.
    updatedAt String
    The date and time when the dedicated inference endpoint was last updated.
    vpcUuid String
    The UUID of the VPC the dedicated inference endpoint is deployed in.
    createdAt string
    The date and time when the dedicated inference endpoint was created.
    enablePublicEndpoint boolean
    Whether the public HTTPS endpoint is enabled.
    id string
    modelDeployments GetDedicatedInferenceModelDeployment[]
    The list of model deployments running on the endpoint. Each element contains:
    name string
    The name of the dedicated inference endpoint.
    privateEndpointFqdn string
    The fully-qualified domain name of the private endpoint.
    publicEndpointFqdn string
    The fully-qualified domain name of the public endpoint, if enabled.
    region string
    The region where the dedicated inference endpoint is deployed.
    status string
    The current status of the dedicated inference endpoint.
    updatedAt string
    The date and time when the dedicated inference endpoint was last updated.
    vpcUuid string
    The UUID of the VPC the dedicated inference endpoint is deployed in.
    created_at str
    The date and time when the dedicated inference endpoint was created.
    enable_public_endpoint bool
    Whether the public HTTPS endpoint is enabled.
    id str
    model_deployments Sequence[GetDedicatedInferenceModelDeployment]
    The list of model deployments running on the endpoint. Each element contains:
    name str
    The name of the dedicated inference endpoint.
    private_endpoint_fqdn str
    The fully-qualified domain name of the private endpoint.
    public_endpoint_fqdn str
    The fully-qualified domain name of the public endpoint, if enabled.
    region str
    The region where the dedicated inference endpoint is deployed.
    status str
    The current status of the dedicated inference endpoint.
    updated_at str
    The date and time when the dedicated inference endpoint was last updated.
    vpc_uuid str
    The UUID of the VPC the dedicated inference endpoint is deployed in.
    createdAt String
    The date and time when the dedicated inference endpoint was created.
    enablePublicEndpoint Boolean
    Whether the public HTTPS endpoint is enabled.
    id String
    modelDeployments List<Property Map>
    The list of model deployments running on the endpoint. Each element contains:
    name String
    The name of the dedicated inference endpoint.
    privateEndpointFqdn String
    The fully-qualified domain name of the private endpoint.
    publicEndpointFqdn String
    The fully-qualified domain name of the public endpoint, if enabled.
    region String
    The region where the dedicated inference endpoint is deployed.
    status String
    The current status of the dedicated inference endpoint.
    updatedAt String
    The date and time when the dedicated inference endpoint was last updated.
    vpcUuid String
    The UUID of the VPC the dedicated inference endpoint is deployed in.

    Supporting Types

    GetDedicatedInferenceModelDeployment

    Accelerators List<Pulumi.DigitalOcean.Inputs.GetDedicatedInferenceModelDeploymentAccelerator>
    The GPU accelerators allocated for this model deployment. Each element contains:
    ModelId string
    The unique ID of the model.
    ModelProvider string
    The provider of the model.
    ModelSlug string
    The slug identifier for the model.
    ProviderModelId string
    The provider-specific model ID.
    Accelerators []GetDedicatedInferenceModelDeploymentAccelerator
    The GPU accelerators allocated for this model deployment. Each element contains:
    ModelId string
    The unique ID of the model.
    ModelProvider string
    The provider of the model.
    ModelSlug string
    The slug identifier for the model.
    ProviderModelId string
    The provider-specific model ID.
    accelerators List<GetDedicatedInferenceModelDeploymentAccelerator>
    The GPU accelerators allocated for this model deployment. Each element contains:
    modelId String
    The unique ID of the model.
    modelProvider String
    The provider of the model.
    modelSlug String
    The slug identifier for the model.
    providerModelId String
    The provider-specific model ID.
    accelerators GetDedicatedInferenceModelDeploymentAccelerator[]
    The GPU accelerators allocated for this model deployment. Each element contains:
    modelId string
    The unique ID of the model.
    modelProvider string
    The provider of the model.
    modelSlug string
    The slug identifier for the model.
    providerModelId string
    The provider-specific model ID.
    accelerators Sequence[GetDedicatedInferenceModelDeploymentAccelerator]
    The GPU accelerators allocated for this model deployment. Each element contains:
    model_id str
    The unique ID of the model.
    model_provider str
    The provider of the model.
    model_slug str
    The slug identifier for the model.
    provider_model_id str
    The provider-specific model ID.
    accelerators List<Property Map>
    The GPU accelerators allocated for this model deployment. Each element contains:
    modelId String
    The unique ID of the model.
    modelProvider String
    The provider of the model.
    modelSlug String
    The slug identifier for the model.
    providerModelId String
    The provider-specific model ID.

    GetDedicatedInferenceModelDeploymentAccelerator

    AcceleratorSlug string
    The slug identifier for the GPU accelerator type.
    Scale int
    The number of accelerator units allocated.
    Type string
    The accelerator type.
    AcceleratorSlug string
    The slug identifier for the GPU accelerator type.
    Scale int
    The number of accelerator units allocated.
    Type string
    The accelerator type.
    acceleratorSlug String
    The slug identifier for the GPU accelerator type.
    scale Integer
    The number of accelerator units allocated.
    type String
    The accelerator type.
    acceleratorSlug string
    The slug identifier for the GPU accelerator type.
    scale number
    The number of accelerator units allocated.
    type string
    The accelerator type.
    accelerator_slug str
    The slug identifier for the GPU accelerator type.
    scale int
    The number of accelerator units allocated.
    type str
    The accelerator type.
    acceleratorSlug String
    The slug identifier for the GPU accelerator type.
    scale Number
    The number of accelerator units allocated.
    type String
    The accelerator type.

    Package Details

    Repository
    DigitalOcean pulumi/pulumi-digitalocean
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the digitalocean Terraform Provider.
    digitalocean logo
    Viewing docs for DigitalOcean v4.65.0
    published on Wednesday, Apr 29, 2026 by Pulumi
      Try Pulumi Cloud free. Your team will thank you.