Viewing docs for gcore 2.0.0-alpha.3
published on Monday, Mar 30, 2026 by g-core
published on Monday, Mar 30, 2026 by g-core
Viewing docs for gcore 2.0.0-alpha.3
published on Monday, Mar 30, 2026 by g-core
published on Monday, Mar 30, 2026 by g-core
Inference flavors define the GPU and CPU resource configurations available for inference deployments.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as gcore from "@pulumi/gcore";
const exampleCloudInferenceFlavor = gcore.getCloudInferenceFlavor({
flavorName: "inference-16vcpu-232gib-1xh100-80gb",
});
import pulumi
import pulumi_gcore as gcore
example_cloud_inference_flavor = gcore.get_cloud_inference_flavor(flavor_name="inference-16vcpu-232gib-1xh100-80gb")
package main
import (
"github.com/pulumi/pulumi-terraform-provider/sdks/go/gcore/v2/gcore"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := gcore.GetCloudInferenceFlavor(ctx, &gcore.GetCloudInferenceFlavorArgs{
FlavorName: "inference-16vcpu-232gib-1xh100-80gb",
}, nil)
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcore = Pulumi.Gcore;
return await Deployment.RunAsync(() =>
{
var exampleCloudInferenceFlavor = Gcore.GetCloudInferenceFlavor.Invoke(new()
{
FlavorName = "inference-16vcpu-232gib-1xh100-80gb",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcore.GcoreFunctions;
import com.pulumi.gcore.inputs.GetCloudInferenceFlavorArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var exampleCloudInferenceFlavor = GcoreFunctions.getCloudInferenceFlavor(GetCloudInferenceFlavorArgs.builder()
.flavorName("inference-16vcpu-232gib-1xh100-80gb")
.build());
}
}
variables:
exampleCloudInferenceFlavor:
fn::invoke:
function: gcore:getCloudInferenceFlavor
arguments:
flavorName: inference-16vcpu-232gib-1xh100-80gb
Using getCloudInferenceFlavor
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getCloudInferenceFlavor(args: GetCloudInferenceFlavorArgs, opts?: InvokeOptions): Promise<GetCloudInferenceFlavorResult>
function getCloudInferenceFlavorOutput(args: GetCloudInferenceFlavorOutputArgs, opts?: InvokeOptions): Output<GetCloudInferenceFlavorResult>def get_cloud_inference_flavor(flavor_name: Optional[str] = None,
opts: Optional[InvokeOptions] = None) -> GetCloudInferenceFlavorResult
def get_cloud_inference_flavor_output(flavor_name: Optional[pulumi.Input[str]] = None,
opts: Optional[InvokeOptions] = None) -> Output[GetCloudInferenceFlavorResult]func GetCloudInferenceFlavor(ctx *Context, args *GetCloudInferenceFlavorArgs, opts ...InvokeOption) (*GetCloudInferenceFlavorResult, error)
func GetCloudInferenceFlavorOutput(ctx *Context, args *GetCloudInferenceFlavorOutputArgs, opts ...InvokeOption) GetCloudInferenceFlavorResultOutput> Note: This function is named GetCloudInferenceFlavor in the Go SDK.
public static class GetCloudInferenceFlavor
{
public static Task<GetCloudInferenceFlavorResult> InvokeAsync(GetCloudInferenceFlavorArgs args, InvokeOptions? opts = null)
public static Output<GetCloudInferenceFlavorResult> Invoke(GetCloudInferenceFlavorInvokeArgs args, InvokeOptions? opts = null)
}public static CompletableFuture<GetCloudInferenceFlavorResult> getCloudInferenceFlavor(GetCloudInferenceFlavorArgs args, InvokeOptions options)
public static Output<GetCloudInferenceFlavorResult> getCloudInferenceFlavor(GetCloudInferenceFlavorArgs args, InvokeOptions options)
fn::invoke:
function: gcore:index/getCloudInferenceFlavor:getCloudInferenceFlavor
arguments:
# arguments dictionaryThe following arguments are supported:
- Flavor
Name string - Inference flavor name.
- Flavor
Name string - Inference flavor name.
- flavor
Name String - Inference flavor name.
- flavor
Name string - Inference flavor name.
- flavor_
name str - Inference flavor name.
- flavor
Name String - Inference flavor name.
getCloudInferenceFlavor Result
The following output properties are available:
- Cpu double
- Inference flavor cpu count.
- Description string
- Inference flavor description.
- Flavor
Name string - Inference flavor name.
- Gpu double
- Inference flavor gpu count.
- Gpu
Compute stringCapability - Inference flavor gpu compute capability.
- Gpu
Memory double - Inference flavor gpu memory in Gi.
- Gpu
Model string - Inference flavor gpu model.
- Id string
- The provider-assigned unique ID for this managed resource.
- bool
- Inference flavor is gpu shared.
- Memory double
- Inference flavor memory in Gi.
- Name string
- Inference flavor name.
- Cpu float64
- Inference flavor cpu count.
- Description string
- Inference flavor description.
- Flavor
Name string - Inference flavor name.
- Gpu float64
- Inference flavor gpu count.
- Gpu
Compute stringCapability - Inference flavor gpu compute capability.
- Gpu
Memory float64 - Inference flavor gpu memory in Gi.
- Gpu
Model string - Inference flavor gpu model.
- Id string
- The provider-assigned unique ID for this managed resource.
- bool
- Inference flavor is gpu shared.
- Memory float64
- Inference flavor memory in Gi.
- Name string
- Inference flavor name.
- cpu Double
- Inference flavor cpu count.
- description String
- Inference flavor description.
- flavor
Name String - Inference flavor name.
- gpu Double
- Inference flavor gpu count.
- gpu
Compute StringCapability - Inference flavor gpu compute capability.
- gpu
Memory Double - Inference flavor gpu memory in Gi.
- gpu
Model String - Inference flavor gpu model.
- id String
- The provider-assigned unique ID for this managed resource.
- Boolean
- Inference flavor is gpu shared.
- memory Double
- Inference flavor memory in Gi.
- name String
- Inference flavor name.
- cpu number
- Inference flavor cpu count.
- description string
- Inference flavor description.
- flavor
Name string - Inference flavor name.
- gpu number
- Inference flavor gpu count.
- gpu
Compute stringCapability - Inference flavor gpu compute capability.
- gpu
Memory number - Inference flavor gpu memory in Gi.
- gpu
Model string - Inference flavor gpu model.
- id string
- The provider-assigned unique ID for this managed resource.
- boolean
- Inference flavor is gpu shared.
- memory number
- Inference flavor memory in Gi.
- name string
- Inference flavor name.
- cpu float
- Inference flavor cpu count.
- description str
- Inference flavor description.
- flavor_
name str - Inference flavor name.
- gpu float
- Inference flavor gpu count.
- gpu_
compute_ strcapability - Inference flavor gpu compute capability.
- gpu_
memory float - Inference flavor gpu memory in Gi.
- gpu_
model str - Inference flavor gpu model.
- id str
- The provider-assigned unique ID for this managed resource.
- bool
- Inference flavor is gpu shared.
- memory float
- Inference flavor memory in Gi.
- name str
- Inference flavor name.
- cpu Number
- Inference flavor cpu count.
- description String
- Inference flavor description.
- flavor
Name String - Inference flavor name.
- gpu Number
- Inference flavor gpu count.
- gpu
Compute StringCapability - Inference flavor gpu compute capability.
- gpu
Memory Number - Inference flavor gpu memory in Gi.
- gpu
Model String - Inference flavor gpu model.
- id String
- The provider-assigned unique ID for this managed resource.
- Boolean
- Inference flavor is gpu shared.
- memory Number
- Inference flavor memory in Gi.
- name String
- Inference flavor name.
Package Details
- Repository
- gcore g-core/terraform-provider-gcore
- License
- Notes
- This Pulumi package is based on the
gcoreTerraform Provider.
Viewing docs for gcore 2.0.0-alpha.3
published on Monday, Mar 30, 2026 by g-core
published on Monday, Mar 30, 2026 by g-core
