1. Packages
  2. Packages
  3. DigitalOcean Provider
  4. API Docs
  5. getDedicatedInferenceGpuModelConfig
Viewing docs for DigitalOcean v4.65.0
published on Wednesday, Apr 29, 2026 by Pulumi
digitalocean logo
Viewing docs for DigitalOcean v4.65.0
published on Wednesday, Apr 29, 2026 by Pulumi

    Returns the supported GPU and model compatibility matrix for dedicated inference endpoints. Use this data source to discover which models can be deployed on which GPU types.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as digitalocean from "@pulumi/digitalocean";
    
    const available = digitalocean.getDedicatedInferenceGpuModelConfig({});
    export const gpuModelConfigs = available.then(available => available.gpuModelConfigs);
    
    import pulumi
    import pulumi_digitalocean as digitalocean
    
    available = digitalocean.get_dedicated_inference_gpu_model_config()
    pulumi.export("gpuModelConfigs", available.gpu_model_configs)
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		available, err := digitalocean.GetDedicatedInferenceGpuModelConfig(ctx, map[string]interface{}{}, nil)
    		if err != nil {
    			return err
    		}
    		ctx.Export("gpuModelConfigs", available.GpuModelConfigs)
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using DigitalOcean = Pulumi.DigitalOcean;
    
    return await Deployment.RunAsync(() => 
    {
        var available = DigitalOcean.Index.GetDedicatedInferenceGpuModelConfig.Invoke();
    
        return new Dictionary<string, object?>
        {
            ["gpuModelConfigs"] = available.Apply(getDedicatedInferenceGpuModelConfigResult => getDedicatedInferenceGpuModelConfigResult.GpuModelConfigs),
        };
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.digitalocean.DigitaloceanFunctions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var available = DigitaloceanFunctions.getDedicatedInferenceGpuModelConfig(%!v(PANIC=Format method: runtime error: invalid memory address or nil pointer dereference);
    
            ctx.export("gpuModelConfigs", available.gpuModelConfigs());
        }
    }
    
    variables:
      available:
        fn::invoke:
          function: digitalocean:getDedicatedInferenceGpuModelConfig
          arguments: {}
    outputs:
      gpuModelConfigs: ${available.gpuModelConfigs}
    

    Using getDedicatedInferenceGpuModelConfig

    Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.

    function getDedicatedInferenceGpuModelConfig(opts?: InvokeOptions): Promise<GetDedicatedInferenceGpuModelConfigResult>
    function getDedicatedInferenceGpuModelConfigOutput(opts?: InvokeOptions): Output<GetDedicatedInferenceGpuModelConfigResult>
    def get_dedicated_inference_gpu_model_config(opts: Optional[InvokeOptions] = None) -> GetDedicatedInferenceGpuModelConfigResult
    def get_dedicated_inference_gpu_model_config_output(opts: Optional[InvokeOptions] = None) -> Output[GetDedicatedInferenceGpuModelConfigResult]
    func GetDedicatedInferenceGpuModelConfig(ctx *Context, opts ...InvokeOption) (*GetDedicatedInferenceGpuModelConfigResult, error)
    func GetDedicatedInferenceGpuModelConfigOutput(ctx *Context, opts ...InvokeOption) GetDedicatedInferenceGpuModelConfigResultOutput

    > Note: This function is named GetDedicatedInferenceGpuModelConfig in the Go SDK.

    public static class GetDedicatedInferenceGpuModelConfig 
    {
        public static Task<GetDedicatedInferenceGpuModelConfigResult> InvokeAsync(InvokeOptions? opts = null)
        public static Output<GetDedicatedInferenceGpuModelConfigResult> Invoke(InvokeOptions? opts = null)
    }
    public static CompletableFuture<GetDedicatedInferenceGpuModelConfigResult> getDedicatedInferenceGpuModelConfig(InvokeOptions options)
    public static Output<GetDedicatedInferenceGpuModelConfigResult> getDedicatedInferenceGpuModelConfig(InvokeOptions options)
    
    fn::invoke:
      function: digitalocean:index/getDedicatedInferenceGpuModelConfig:getDedicatedInferenceGpuModelConfig
      arguments:
        # arguments dictionary

    getDedicatedInferenceGpuModelConfig Result

    The following output properties are available:

    GpuModelConfigs List<Pulumi.DigitalOcean.Outputs.GetDedicatedInferenceGpuModelConfigGpuModelConfig>
    The list of supported GPU and model combinations. Each element contains:
    Id string
    The provider-assigned unique ID for this managed resource.
    GpuModelConfigs []GetDedicatedInferenceGpuModelConfigGpuModelConfig
    The list of supported GPU and model combinations. Each element contains:
    Id string
    The provider-assigned unique ID for this managed resource.
    gpuModelConfigs List<GetDedicatedInferenceGpuModelConfigGpuModelConfig>
    The list of supported GPU and model combinations. Each element contains:
    id String
    The provider-assigned unique ID for this managed resource.
    gpuModelConfigs GetDedicatedInferenceGpuModelConfigGpuModelConfig[]
    The list of supported GPU and model combinations. Each element contains:
    id string
    The provider-assigned unique ID for this managed resource.
    gpu_model_configs Sequence[GetDedicatedInferenceGpuModelConfigGpuModelConfig]
    The list of supported GPU and model combinations. Each element contains:
    id str
    The provider-assigned unique ID for this managed resource.
    gpuModelConfigs List<Property Map>
    The list of supported GPU and model combinations. Each element contains:
    id String
    The provider-assigned unique ID for this managed resource.

    Supporting Types

    GetDedicatedInferenceGpuModelConfigGpuModelConfig

    GpuSlugs List<string>
    The GPU slugs that support this model.
    IsModelGated bool
    Whether the model requires gated access (e.g. a HuggingFace token).
    ModelName string
    The human-readable name of the model.
    ModelSlug string
    The slug identifier for the model.
    GpuSlugs []string
    The GPU slugs that support this model.
    IsModelGated bool
    Whether the model requires gated access (e.g. a HuggingFace token).
    ModelName string
    The human-readable name of the model.
    ModelSlug string
    The slug identifier for the model.
    gpuSlugs List<String>
    The GPU slugs that support this model.
    isModelGated Boolean
    Whether the model requires gated access (e.g. a HuggingFace token).
    modelName String
    The human-readable name of the model.
    modelSlug String
    The slug identifier for the model.
    gpuSlugs string[]
    The GPU slugs that support this model.
    isModelGated boolean
    Whether the model requires gated access (e.g. a HuggingFace token).
    modelName string
    The human-readable name of the model.
    modelSlug string
    The slug identifier for the model.
    gpu_slugs Sequence[str]
    The GPU slugs that support this model.
    is_model_gated bool
    Whether the model requires gated access (e.g. a HuggingFace token).
    model_name str
    The human-readable name of the model.
    model_slug str
    The slug identifier for the model.
    gpuSlugs List<String>
    The GPU slugs that support this model.
    isModelGated Boolean
    Whether the model requires gated access (e.g. a HuggingFace token).
    modelName String
    The human-readable name of the model.
    modelSlug String
    The slug identifier for the model.

    Package Details

    Repository
    DigitalOcean pulumi/pulumi-digitalocean
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the digitalocean Terraform Provider.
    digitalocean logo
    Viewing docs for DigitalOcean v4.65.0
    published on Wednesday, Apr 29, 2026 by Pulumi
      Try Pulumi Cloud free. Your team will thank you.