1. Packages
  2. Gcore Provider
  3. API Docs
  4. getCloudInferenceFlavor
Viewing docs for gcore 2.0.0-alpha.3
published on Monday, Mar 30, 2026 by g-core
Viewing docs for gcore 2.0.0-alpha.3
published on Monday, Mar 30, 2026 by g-core

    Inference flavors define the GPU and CPU resource configurations available for inference deployments.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as gcore from "@pulumi/gcore";
    
    const exampleCloudInferenceFlavor = gcore.getCloudInferenceFlavor({
        flavorName: "inference-16vcpu-232gib-1xh100-80gb",
    });
    
    import pulumi
    import pulumi_gcore as gcore
    
    example_cloud_inference_flavor = gcore.get_cloud_inference_flavor(flavor_name="inference-16vcpu-232gib-1xh100-80gb")
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-terraform-provider/sdks/go/gcore/v2/gcore"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := gcore.GetCloudInferenceFlavor(ctx, &gcore.GetCloudInferenceFlavorArgs{
    			FlavorName: "inference-16vcpu-232gib-1xh100-80gb",
    		}, nil)
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcore = Pulumi.Gcore;
    
    return await Deployment.RunAsync(() => 
    {
        var exampleCloudInferenceFlavor = Gcore.GetCloudInferenceFlavor.Invoke(new()
        {
            FlavorName = "inference-16vcpu-232gib-1xh100-80gb",
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcore.GcoreFunctions;
    import com.pulumi.gcore.inputs.GetCloudInferenceFlavorArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var exampleCloudInferenceFlavor = GcoreFunctions.getCloudInferenceFlavor(GetCloudInferenceFlavorArgs.builder()
                .flavorName("inference-16vcpu-232gib-1xh100-80gb")
                .build());
    
        }
    }
    
    variables:
      exampleCloudInferenceFlavor:
        fn::invoke:
          function: gcore:getCloudInferenceFlavor
          arguments:
            flavorName: inference-16vcpu-232gib-1xh100-80gb
    

    Using getCloudInferenceFlavor

    Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.

    function getCloudInferenceFlavor(args: GetCloudInferenceFlavorArgs, opts?: InvokeOptions): Promise<GetCloudInferenceFlavorResult>
    function getCloudInferenceFlavorOutput(args: GetCloudInferenceFlavorOutputArgs, opts?: InvokeOptions): Output<GetCloudInferenceFlavorResult>
    def get_cloud_inference_flavor(flavor_name: Optional[str] = None,
                                   opts: Optional[InvokeOptions] = None) -> GetCloudInferenceFlavorResult
    def get_cloud_inference_flavor_output(flavor_name: Optional[pulumi.Input[str]] = None,
                                   opts: Optional[InvokeOptions] = None) -> Output[GetCloudInferenceFlavorResult]
    func GetCloudInferenceFlavor(ctx *Context, args *GetCloudInferenceFlavorArgs, opts ...InvokeOption) (*GetCloudInferenceFlavorResult, error)
    func GetCloudInferenceFlavorOutput(ctx *Context, args *GetCloudInferenceFlavorOutputArgs, opts ...InvokeOption) GetCloudInferenceFlavorResultOutput

    > Note: This function is named GetCloudInferenceFlavor in the Go SDK.

    public static class GetCloudInferenceFlavor 
    {
        public static Task<GetCloudInferenceFlavorResult> InvokeAsync(GetCloudInferenceFlavorArgs args, InvokeOptions? opts = null)
        public static Output<GetCloudInferenceFlavorResult> Invoke(GetCloudInferenceFlavorInvokeArgs args, InvokeOptions? opts = null)
    }
    public static CompletableFuture<GetCloudInferenceFlavorResult> getCloudInferenceFlavor(GetCloudInferenceFlavorArgs args, InvokeOptions options)
    public static Output<GetCloudInferenceFlavorResult> getCloudInferenceFlavor(GetCloudInferenceFlavorArgs args, InvokeOptions options)
    
    fn::invoke:
      function: gcore:index/getCloudInferenceFlavor:getCloudInferenceFlavor
      arguments:
        # arguments dictionary

    The following arguments are supported:

    FlavorName string
    Inference flavor name.
    FlavorName string
    Inference flavor name.
    flavorName String
    Inference flavor name.
    flavorName string
    Inference flavor name.
    flavor_name str
    Inference flavor name.
    flavorName String
    Inference flavor name.

    getCloudInferenceFlavor Result

    The following output properties are available:

    Cpu double
    Inference flavor cpu count.
    Description string
    Inference flavor description.
    FlavorName string
    Inference flavor name.
    Gpu double
    Inference flavor gpu count.
    GpuComputeCapability string
    Inference flavor gpu compute capability.
    GpuMemory double
    Inference flavor gpu memory in Gi.
    GpuModel string
    Inference flavor gpu model.
    Id string
    The provider-assigned unique ID for this managed resource.
    IsGpuShared bool
    Inference flavor is gpu shared.
    Memory double
    Inference flavor memory in Gi.
    Name string
    Inference flavor name.
    Cpu float64
    Inference flavor cpu count.
    Description string
    Inference flavor description.
    FlavorName string
    Inference flavor name.
    Gpu float64
    Inference flavor gpu count.
    GpuComputeCapability string
    Inference flavor gpu compute capability.
    GpuMemory float64
    Inference flavor gpu memory in Gi.
    GpuModel string
    Inference flavor gpu model.
    Id string
    The provider-assigned unique ID for this managed resource.
    IsGpuShared bool
    Inference flavor is gpu shared.
    Memory float64
    Inference flavor memory in Gi.
    Name string
    Inference flavor name.
    cpu Double
    Inference flavor cpu count.
    description String
    Inference flavor description.
    flavorName String
    Inference flavor name.
    gpu Double
    Inference flavor gpu count.
    gpuComputeCapability String
    Inference flavor gpu compute capability.
    gpuMemory Double
    Inference flavor gpu memory in Gi.
    gpuModel String
    Inference flavor gpu model.
    id String
    The provider-assigned unique ID for this managed resource.
    isGpuShared Boolean
    Inference flavor is gpu shared.
    memory Double
    Inference flavor memory in Gi.
    name String
    Inference flavor name.
    cpu number
    Inference flavor cpu count.
    description string
    Inference flavor description.
    flavorName string
    Inference flavor name.
    gpu number
    Inference flavor gpu count.
    gpuComputeCapability string
    Inference flavor gpu compute capability.
    gpuMemory number
    Inference flavor gpu memory in Gi.
    gpuModel string
    Inference flavor gpu model.
    id string
    The provider-assigned unique ID for this managed resource.
    isGpuShared boolean
    Inference flavor is gpu shared.
    memory number
    Inference flavor memory in Gi.
    name string
    Inference flavor name.
    cpu float
    Inference flavor cpu count.
    description str
    Inference flavor description.
    flavor_name str
    Inference flavor name.
    gpu float
    Inference flavor gpu count.
    gpu_compute_capability str
    Inference flavor gpu compute capability.
    gpu_memory float
    Inference flavor gpu memory in Gi.
    gpu_model str
    Inference flavor gpu model.
    id str
    The provider-assigned unique ID for this managed resource.
    is_gpu_shared bool
    Inference flavor is gpu shared.
    memory float
    Inference flavor memory in Gi.
    name str
    Inference flavor name.
    cpu Number
    Inference flavor cpu count.
    description String
    Inference flavor description.
    flavorName String
    Inference flavor name.
    gpu Number
    Inference flavor gpu count.
    gpuComputeCapability String
    Inference flavor gpu compute capability.
    gpuMemory Number
    Inference flavor gpu memory in Gi.
    gpuModel String
    Inference flavor gpu model.
    id String
    The provider-assigned unique ID for this managed resource.
    isGpuShared Boolean
    Inference flavor is gpu shared.
    memory Number
    Inference flavor memory in Gi.
    name String
    Inference flavor name.

    Package Details

    Repository
    gcore g-core/terraform-provider-gcore
    License
    Notes
    This Pulumi package is based on the gcore Terraform Provider.
    Viewing docs for gcore 2.0.0-alpha.3
    published on Monday, Mar 30, 2026 by g-core
      Try Pulumi Cloud free. Your team will thank you.