1. Packages
  2. Azure Classic
  3. API Docs
  4. media
  5. Transform

We recommend using Azure Native.

Azure Classic v5.73.0 published on Monday, Apr 22, 2024 by Pulumi

azure.media.Transform

Explore with Pulumi AI

azure logo

We recommend using Azure Native.

Azure Classic v5.73.0 published on Monday, Apr 22, 2024 by Pulumi

    Manages a Transform.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as azure from "@pulumi/azure";
    
    const example = new azure.core.ResourceGroup("example", {
        name: "media-resources",
        location: "West Europe",
    });
    const exampleAccount = new azure.storage.Account("example", {
        name: "examplestoracc",
        resourceGroupName: example.name,
        location: example.location,
        accountTier: "Standard",
        accountReplicationType: "GRS",
    });
    const exampleServiceAccount = new azure.media.ServiceAccount("example", {
        name: "examplemediaacc",
        location: example.location,
        resourceGroupName: example.name,
        storageAccounts: [{
            id: exampleAccount.id,
            isPrimary: true,
        }],
    });
    const exampleTransform = new azure.media.Transform("example", {
        name: "transform1",
        resourceGroupName: example.name,
        mediaServicesAccountName: exampleServiceAccount.name,
        description: "My transform description",
        outputs: [{
            relativePriority: "Normal",
            onErrorAction: "ContinueJob",
            builtinPreset: {
                presetName: "AACGoodQualityAudio",
            },
        }],
    });
    
    import pulumi
    import pulumi_azure as azure
    
    example = azure.core.ResourceGroup("example",
        name="media-resources",
        location="West Europe")
    example_account = azure.storage.Account("example",
        name="examplestoracc",
        resource_group_name=example.name,
        location=example.location,
        account_tier="Standard",
        account_replication_type="GRS")
    example_service_account = azure.media.ServiceAccount("example",
        name="examplemediaacc",
        location=example.location,
        resource_group_name=example.name,
        storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
            id=example_account.id,
            is_primary=True,
        )])
    example_transform = azure.media.Transform("example",
        name="transform1",
        resource_group_name=example.name,
        media_services_account_name=example_service_account.name,
        description="My transform description",
        outputs=[azure.media.TransformOutputArgs(
            relative_priority="Normal",
            on_error_action="ContinueJob",
            builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
                preset_name="AACGoodQualityAudio",
            ),
        )])
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core"
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/media"
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
    			Name:     pulumi.String("media-resources"),
    			Location: pulumi.String("West Europe"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
    			Name:                   pulumi.String("examplestoracc"),
    			ResourceGroupName:      example.Name,
    			Location:               example.Location,
    			AccountTier:            pulumi.String("Standard"),
    			AccountReplicationType: pulumi.String("GRS"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleServiceAccount, err := media.NewServiceAccount(ctx, "example", &media.ServiceAccountArgs{
    			Name:              pulumi.String("examplemediaacc"),
    			Location:          example.Location,
    			ResourceGroupName: example.Name,
    			StorageAccounts: media.ServiceAccountStorageAccountArray{
    				&media.ServiceAccountStorageAccountArgs{
    					Id:        exampleAccount.ID(),
    					IsPrimary: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = media.NewTransform(ctx, "example", &media.TransformArgs{
    			Name:                     pulumi.String("transform1"),
    			ResourceGroupName:        example.Name,
    			MediaServicesAccountName: exampleServiceAccount.Name,
    			Description:              pulumi.String("My transform description"),
    			Outputs: media.TransformOutputTypeArray{
    				&media.TransformOutputTypeArgs{
    					RelativePriority: pulumi.String("Normal"),
    					OnErrorAction:    pulumi.String("ContinueJob"),
    					BuiltinPreset: &media.TransformOutputBuiltinPresetArgs{
    						PresetName: pulumi.String("AACGoodQualityAudio"),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Azure = Pulumi.Azure;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Azure.Core.ResourceGroup("example", new()
        {
            Name = "media-resources",
            Location = "West Europe",
        });
    
        var exampleAccount = new Azure.Storage.Account("example", new()
        {
            Name = "examplestoracc",
            ResourceGroupName = example.Name,
            Location = example.Location,
            AccountTier = "Standard",
            AccountReplicationType = "GRS",
        });
    
        var exampleServiceAccount = new Azure.Media.ServiceAccount("example", new()
        {
            Name = "examplemediaacc",
            Location = example.Location,
            ResourceGroupName = example.Name,
            StorageAccounts = new[]
            {
                new Azure.Media.Inputs.ServiceAccountStorageAccountArgs
                {
                    Id = exampleAccount.Id,
                    IsPrimary = true,
                },
            },
        });
    
        var exampleTransform = new Azure.Media.Transform("example", new()
        {
            Name = "transform1",
            ResourceGroupName = example.Name,
            MediaServicesAccountName = exampleServiceAccount.Name,
            Description = "My transform description",
            Outputs = new[]
            {
                new Azure.Media.Inputs.TransformOutputArgs
                {
                    RelativePriority = "Normal",
                    OnErrorAction = "ContinueJob",
                    BuiltinPreset = new Azure.Media.Inputs.TransformOutputBuiltinPresetArgs
                    {
                        PresetName = "AACGoodQualityAudio",
                    },
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azure.core.ResourceGroup;
    import com.pulumi.azure.core.ResourceGroupArgs;
    import com.pulumi.azure.storage.Account;
    import com.pulumi.azure.storage.AccountArgs;
    import com.pulumi.azure.media.ServiceAccount;
    import com.pulumi.azure.media.ServiceAccountArgs;
    import com.pulumi.azure.media.inputs.ServiceAccountStorageAccountArgs;
    import com.pulumi.azure.media.Transform;
    import com.pulumi.azure.media.TransformArgs;
    import com.pulumi.azure.media.inputs.TransformOutputArgs;
    import com.pulumi.azure.media.inputs.TransformOutputBuiltinPresetArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new ResourceGroup("example", ResourceGroupArgs.builder()        
                .name("media-resources")
                .location("West Europe")
                .build());
    
            var exampleAccount = new Account("exampleAccount", AccountArgs.builder()        
                .name("examplestoracc")
                .resourceGroupName(example.name())
                .location(example.location())
                .accountTier("Standard")
                .accountReplicationType("GRS")
                .build());
    
            var exampleServiceAccount = new ServiceAccount("exampleServiceAccount", ServiceAccountArgs.builder()        
                .name("examplemediaacc")
                .location(example.location())
                .resourceGroupName(example.name())
                .storageAccounts(ServiceAccountStorageAccountArgs.builder()
                    .id(exampleAccount.id())
                    .isPrimary(true)
                    .build())
                .build());
    
            var exampleTransform = new Transform("exampleTransform", TransformArgs.builder()        
                .name("transform1")
                .resourceGroupName(example.name())
                .mediaServicesAccountName(exampleServiceAccount.name())
                .description("My transform description")
                .outputs(TransformOutputArgs.builder()
                    .relativePriority("Normal")
                    .onErrorAction("ContinueJob")
                    .builtinPreset(TransformOutputBuiltinPresetArgs.builder()
                        .presetName("AACGoodQualityAudio")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: azure:core:ResourceGroup
        properties:
          name: media-resources
          location: West Europe
      exampleAccount:
        type: azure:storage:Account
        name: example
        properties:
          name: examplestoracc
          resourceGroupName: ${example.name}
          location: ${example.location}
          accountTier: Standard
          accountReplicationType: GRS
      exampleServiceAccount:
        type: azure:media:ServiceAccount
        name: example
        properties:
          name: examplemediaacc
          location: ${example.location}
          resourceGroupName: ${example.name}
          storageAccounts:
            - id: ${exampleAccount.id}
              isPrimary: true
      exampleTransform:
        type: azure:media:Transform
        name: example
        properties:
          name: transform1
          resourceGroupName: ${example.name}
          mediaServicesAccountName: ${exampleServiceAccount.name}
          description: My transform description
          outputs:
            - relativePriority: Normal
              onErrorAction: ContinueJob
              builtinPreset:
                presetName: AACGoodQualityAudio
    

    With Multiple Outputs

    import * as pulumi from "@pulumi/pulumi";
    import * as azure from "@pulumi/azure";
    
    const example = new azure.core.ResourceGroup("example", {
        name: "media-resources",
        location: "West Europe",
    });
    const exampleAccount = new azure.storage.Account("example", {
        name: "examplestoracc",
        resourceGroupName: example.name,
        location: example.location,
        accountTier: "Standard",
        accountReplicationType: "GRS",
    });
    const exampleServiceAccount = new azure.media.ServiceAccount("example", {
        name: "examplemediaacc",
        location: example.location,
        resourceGroupName: example.name,
        storageAccounts: [{
            id: exampleAccount.id,
            isPrimary: true,
        }],
    });
    const exampleTransform = new azure.media.Transform("example", {
        name: "transform1",
        resourceGroupName: example.name,
        mediaServicesAccountName: exampleServiceAccount.name,
        description: "My transform description",
        outputs: [
            {
                relativePriority: "Normal",
                onErrorAction: "ContinueJob",
                builtinPreset: {
                    presetName: "AACGoodQualityAudio",
                    presetConfiguration: {
                        complexity: "Balanced",
                        interleaveOutput: "NonInterleavedOutput",
                        keyFrameIntervalInSeconds: 123122.5,
                        maxBitrateBps: 300000,
                        maxHeight: 480,
                        maxLayers: 14,
                        minBitrateBps: 200000,
                        minHeight: 360,
                    },
                },
            },
            {
                relativePriority: "Low",
                onErrorAction: "ContinueJob",
                audioAnalyzerPreset: {
                    audioLanguage: "en-US",
                    audioAnalysisMode: "Basic",
                    experimentalOptions: {
                        env: "test",
                    },
                },
            },
            {
                relativePriority: "Low",
                onErrorAction: "StopProcessingJob",
                faceDetectorPreset: {
                    analysisResolution: "StandardDefinition",
                    blurType: "Med",
                    faceRedactorMode: "Combined",
                    experimentalOptions: {
                        env: "test",
                    },
                },
            },
            {
                relativePriority: "Normal",
                onErrorAction: "StopProcessingJob",
                videoAnalyzerPreset: {
                    audioLanguage: "en-US",
                    audioAnalysisMode: "Basic",
                    insightsType: "AllInsights",
                    experimentalOptions: {
                        env: "test",
                    },
                },
            },
            {
                relativePriority: "Low",
                onErrorAction: "ContinueJob",
                customPreset: {
                    codecs: [
                        {
                            aacAudio: {
                                bitrate: 128000,
                                channels: 2,
                                samplingRate: 48000,
                                profile: "AacLc",
                            },
                        },
                        {
                            copyAudio: {
                                label: "test",
                            },
                        },
                        {
                            copyVideo: {
                                label: "test",
                            },
                        },
                        {
                            h264Video: {
                                keyFrameInterval: "PT1S",
                                stretchMode: "AutoSize",
                                syncMode: "Auto",
                                sceneChangeDetectionEnabled: false,
                                rateControlMode: "ABR",
                                complexity: "Quality",
                                layers: [
                                    {
                                        width: "64",
                                        height: "64",
                                        bitrate: 1045000,
                                        maxBitrate: 1045000,
                                        bFrames: 3,
                                        slices: 0,
                                        adaptiveBFrameEnabled: true,
                                        profile: "Auto",
                                        level: "auto",
                                        bufferWindow: "PT5S",
                                        referenceFrames: 4,
                                        crf: 23,
                                        entropyMode: "Cabac",
                                    },
                                    {
                                        width: "64",
                                        height: "64",
                                        bitrate: 1000,
                                        maxBitrate: 1000,
                                        bFrames: 3,
                                        frameRate: "32",
                                        slices: 1,
                                        adaptiveBFrameEnabled: true,
                                        profile: "High444",
                                        level: "auto",
                                        bufferWindow: "PT5S",
                                        referenceFrames: 4,
                                        crf: 23,
                                        entropyMode: "Cavlc",
                                    },
                                ],
                            },
                        },
                        {
                            h265Video: {
                                keyFrameInterval: "PT2S",
                                stretchMode: "AutoSize",
                                syncMode: "Auto",
                                sceneChangeDetectionEnabled: false,
                                complexity: "Speed",
                                layers: [{
                                    width: "64",
                                    height: "64",
                                    bitrate: 1045000,
                                    maxBitrate: 1045000,
                                    bFrames: 3,
                                    slices: 5,
                                    adaptiveBFrameEnabled: true,
                                    profile: "Auto",
                                    label: "test",
                                    level: "auto",
                                    bufferWindow: "PT5S",
                                    frameRate: "32",
                                    referenceFrames: 4,
                                    crf: 23,
                                }],
                            },
                        },
                        {
                            jpgImage: {
                                stretchMode: "AutoSize",
                                syncMode: "Auto",
                                start: "10",
                                range: "100%%",
                                spriteColumn: 1,
                                step: "10",
                                layers: [{
                                    quality: 70,
                                    height: "180",
                                    label: "test",
                                    width: "120",
                                }],
                            },
                        },
                        {
                            pngImage: {
                                stretchMode: "AutoSize",
                                syncMode: "Auto",
                                start: "{Best}",
                                range: "80",
                                step: "10",
                                layers: [{
                                    height: "180",
                                    label: "test",
                                    width: "120",
                                }],
                            },
                        },
                    ],
                    formats: [
                        {
                            jpg: {
                                filenamePattern: "test{Basename}",
                            },
                        },
                        {
                            mp4: {
                                filenamePattern: "test{Bitrate}",
                                outputFiles: [{
                                    labels: [
                                        "test",
                                        "ppe",
                                    ],
                                }],
                            },
                        },
                        {
                            png: {
                                filenamePattern: "test{Basename}",
                            },
                        },
                        {
                            transportStream: {
                                filenamePattern: "test{Bitrate}",
                                outputFiles: [{
                                    labels: ["prod"],
                                }],
                            },
                        },
                    ],
                    filter: {
                        cropRectangle: {
                            height: "240",
                            left: "30",
                            top: "360",
                            width: "70",
                        },
                        deinterlace: {
                            parity: "TopFieldFirst",
                            mode: "AutoPixelAdaptive",
                        },
                        fadeIn: {
                            duration: "PT5S",
                            fadeColor: "0xFF0000",
                            start: "10",
                        },
                        fadeOut: {
                            duration: "90%%",
                            fadeColor: "#FF0C7B",
                            start: "10%%",
                        },
                        rotation: "Auto",
                        overlays: [
                            {
                                audio: {
                                    inputLabel: "label.jpg",
                                    start: "PT5S",
                                    end: "PT30S",
                                    fadeInDuration: "PT1S",
                                    fadeOutDuration: "PT2S",
                                    audioGainLevel: 1,
                                },
                            },
                            {
                                video: {
                                    inputLabel: "label.jpg",
                                    start: "PT5S",
                                    end: "PT30S",
                                    fadeInDuration: "PT1S",
                                    fadeOutDuration: "PT2S",
                                    audioGainLevel: 1,
                                    opacity: 1,
                                    position: {
                                        height: "180",
                                        left: "20",
                                        top: "240",
                                        width: "140",
                                    },
                                    cropRectangle: {
                                        height: "240",
                                        left: "30",
                                        top: "360",
                                        width: "70",
                                    },
                                },
                            },
                        ],
                    },
                },
            },
        ],
    });
    
    import pulumi
    import pulumi_azure as azure
    
    example = azure.core.ResourceGroup("example",
        name="media-resources",
        location="West Europe")
    example_account = azure.storage.Account("example",
        name="examplestoracc",
        resource_group_name=example.name,
        location=example.location,
        account_tier="Standard",
        account_replication_type="GRS")
    example_service_account = azure.media.ServiceAccount("example",
        name="examplemediaacc",
        location=example.location,
        resource_group_name=example.name,
        storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
            id=example_account.id,
            is_primary=True,
        )])
    example_transform = azure.media.Transform("example",
        name="transform1",
        resource_group_name=example.name,
        media_services_account_name=example_service_account.name,
        description="My transform description",
        outputs=[
            azure.media.TransformOutputArgs(
                relative_priority="Normal",
                on_error_action="ContinueJob",
                builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
                    preset_name="AACGoodQualityAudio",
                    preset_configuration=azure.media.TransformOutputBuiltinPresetPresetConfigurationArgs(
                        complexity="Balanced",
                        interleave_output="NonInterleavedOutput",
                        key_frame_interval_in_seconds=123122.5,
                        max_bitrate_bps=300000,
                        max_height=480,
                        max_layers=14,
                        min_bitrate_bps=200000,
                        min_height=360,
                    ),
                ),
            ),
            azure.media.TransformOutputArgs(
                relative_priority="Low",
                on_error_action="ContinueJob",
                audio_analyzer_preset=azure.media.TransformOutputAudioAnalyzerPresetArgs(
                    audio_language="en-US",
                    audio_analysis_mode="Basic",
                    experimental_options={
                        "env": "test",
                    },
                ),
            ),
            azure.media.TransformOutputArgs(
                relative_priority="Low",
                on_error_action="StopProcessingJob",
                face_detector_preset=azure.media.TransformOutputFaceDetectorPresetArgs(
                    analysis_resolution="StandardDefinition",
                    blur_type="Med",
                    face_redactor_mode="Combined",
                    experimental_options={
                        "env": "test",
                    },
                ),
            ),
            azure.media.TransformOutputArgs(
                relative_priority="Normal",
                on_error_action="StopProcessingJob",
                video_analyzer_preset=azure.media.TransformOutputVideoAnalyzerPresetArgs(
                    audio_language="en-US",
                    audio_analysis_mode="Basic",
                    insights_type="AllInsights",
                    experimental_options={
                        "env": "test",
                    },
                ),
            ),
            azure.media.TransformOutputArgs(
                relative_priority="Low",
                on_error_action="ContinueJob",
                custom_preset=azure.media.TransformOutputCustomPresetArgs(
                    codecs=[
                        azure.media.TransformOutputCustomPresetCodecArgs(
                            aac_audio=azure.media.TransformOutputCustomPresetCodecAacAudioArgs(
                                bitrate=128000,
                                channels=2,
                                sampling_rate=48000,
                                profile="AacLc",
                            ),
                        ),
                        azure.media.TransformOutputCustomPresetCodecArgs(
                            copy_audio=azure.media.TransformOutputCustomPresetCodecCopyAudioArgs(
                                label="test",
                            ),
                        ),
                        azure.media.TransformOutputCustomPresetCodecArgs(
                            copy_video=azure.media.TransformOutputCustomPresetCodecCopyVideoArgs(
                                label="test",
                            ),
                        ),
                        azure.media.TransformOutputCustomPresetCodecArgs(
                            h264_video=azure.media.TransformOutputCustomPresetCodecH264VideoArgs(
                                key_frame_interval="PT1S",
                                stretch_mode="AutoSize",
                                sync_mode="Auto",
                                scene_change_detection_enabled=False,
                                rate_control_mode="ABR",
                                complexity="Quality",
                                layers=[
                                    azure.media.TransformOutputCustomPresetCodecH264VideoLayerArgs(
                                        width="64",
                                        height="64",
                                        bitrate=1045000,
                                        max_bitrate=1045000,
                                        b_frames=3,
                                        slices=0,
                                        adaptive_b_frame_enabled=True,
                                        profile="Auto",
                                        level="auto",
                                        buffer_window="PT5S",
                                        reference_frames=4,
                                        crf=23,
                                        entropy_mode="Cabac",
                                    ),
                                    azure.media.TransformOutputCustomPresetCodecH264VideoLayerArgs(
                                        width="64",
                                        height="64",
                                        bitrate=1000,
                                        max_bitrate=1000,
                                        b_frames=3,
                                        frame_rate="32",
                                        slices=1,
                                        adaptive_b_frame_enabled=True,
                                        profile="High444",
                                        level="auto",
                                        buffer_window="PT5S",
                                        reference_frames=4,
                                        crf=23,
                                        entropy_mode="Cavlc",
                                    ),
                                ],
                            ),
                        ),
                        azure.media.TransformOutputCustomPresetCodecArgs(
                            h265_video=azure.media.TransformOutputCustomPresetCodecH265VideoArgs(
                                key_frame_interval="PT2S",
                                stretch_mode="AutoSize",
                                sync_mode="Auto",
                                scene_change_detection_enabled=False,
                                complexity="Speed",
                                layers=[azure.media.TransformOutputCustomPresetCodecH265VideoLayerArgs(
                                    width="64",
                                    height="64",
                                    bitrate=1045000,
                                    max_bitrate=1045000,
                                    b_frames=3,
                                    slices=5,
                                    adaptive_b_frame_enabled=True,
                                    profile="Auto",
                                    label="test",
                                    level="auto",
                                    buffer_window="PT5S",
                                    frame_rate="32",
                                    reference_frames=4,
                                    crf=23,
                                )],
                            ),
                        ),
                        azure.media.TransformOutputCustomPresetCodecArgs(
                            jpg_image=azure.media.TransformOutputCustomPresetCodecJpgImageArgs(
                                stretch_mode="AutoSize",
                                sync_mode="Auto",
                                start="10",
                                range="100%%",
                                sprite_column=1,
                                step="10",
                                layers=[azure.media.TransformOutputCustomPresetCodecJpgImageLayerArgs(
                                    quality=70,
                                    height="180",
                                    label="test",
                                    width="120",
                                )],
                            ),
                        ),
                        azure.media.TransformOutputCustomPresetCodecArgs(
                            png_image=azure.media.TransformOutputCustomPresetCodecPngImageArgs(
                                stretch_mode="AutoSize",
                                sync_mode="Auto",
                                start="{Best}",
                                range="80",
                                step="10",
                                layers=[azure.media.TransformOutputCustomPresetCodecPngImageLayerArgs(
                                    height="180",
                                    label="test",
                                    width="120",
                                )],
                            ),
                        ),
                    ],
                    formats=[
                        azure.media.TransformOutputCustomPresetFormatArgs(
                            jpg=azure.media.TransformOutputCustomPresetFormatJpgArgs(
                                filename_pattern="test{Basename}",
                            ),
                        ),
                        azure.media.TransformOutputCustomPresetFormatArgs(
                            mp4=azure.media.TransformOutputCustomPresetFormatMp4Args(
                                filename_pattern="test{Bitrate}",
                                output_files=[azure.media.TransformOutputCustomPresetFormatMp4OutputFileArgs(
                                    labels=[
                                        "test",
                                        "ppe",
                                    ],
                                )],
                            ),
                        ),
                        azure.media.TransformOutputCustomPresetFormatArgs(
                            png=azure.media.TransformOutputCustomPresetFormatPngArgs(
                                filename_pattern="test{Basename}",
                            ),
                        ),
                        azure.media.TransformOutputCustomPresetFormatArgs(
                            transport_stream=azure.media.TransformOutputCustomPresetFormatTransportStreamArgs(
                                filename_pattern="test{Bitrate}",
                                output_files=[azure.media.TransformOutputCustomPresetFormatTransportStreamOutputFileArgs(
                                    labels=["prod"],
                                )],
                            ),
                        ),
                    ],
                    filter=azure.media.TransformOutputCustomPresetFilterArgs(
                        crop_rectangle=azure.media.TransformOutputCustomPresetFilterCropRectangleArgs(
                            height="240",
                            left="30",
                            top="360",
                            width="70",
                        ),
                        deinterlace=azure.media.TransformOutputCustomPresetFilterDeinterlaceArgs(
                            parity="TopFieldFirst",
                            mode="AutoPixelAdaptive",
                        ),
                        fade_in=azure.media.TransformOutputCustomPresetFilterFadeInArgs(
                            duration="PT5S",
                            fade_color="0xFF0000",
                            start="10",
                        ),
                        fade_out=azure.media.TransformOutputCustomPresetFilterFadeOutArgs(
                            duration="90%%",
                            fade_color="#FF0C7B",
                            start="10%%",
                        ),
                        rotation="Auto",
                        overlays=[
                            azure.media.TransformOutputCustomPresetFilterOverlayArgs(
                                audio=azure.media.TransformOutputCustomPresetFilterOverlayAudioArgs(
                                    input_label="label.jpg",
                                    start="PT5S",
                                    end="PT30S",
                                    fade_in_duration="PT1S",
                                    fade_out_duration="PT2S",
                                    audio_gain_level=1,
                                ),
                            ),
                            azure.media.TransformOutputCustomPresetFilterOverlayArgs(
                                video=azure.media.TransformOutputCustomPresetFilterOverlayVideoArgs(
                                    input_label="label.jpg",
                                    start="PT5S",
                                    end="PT30S",
                                    fade_in_duration="PT1S",
                                    fade_out_duration="PT2S",
                                    audio_gain_level=1,
                                    opacity=1,
                                    position=azure.media.TransformOutputCustomPresetFilterOverlayVideoPositionArgs(
                                        height="180",
                                        left="20",
                                        top="240",
                                        width="140",
                                    ),
                                    crop_rectangle=azure.media.TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs(
                                        height="240",
                                        left="30",
                                        top="360",
                                        width="70",
                                    ),
                                ),
                            ),
                        ],
                    ),
                ),
            ),
        ])
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core"
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/media"
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
    			Name:     pulumi.String("media-resources"),
    			Location: pulumi.String("West Europe"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
    			Name:                   pulumi.String("examplestoracc"),
    			ResourceGroupName:      example.Name,
    			Location:               example.Location,
    			AccountTier:            pulumi.String("Standard"),
    			AccountReplicationType: pulumi.String("GRS"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleServiceAccount, err := media.NewServiceAccount(ctx, "example", &media.ServiceAccountArgs{
    			Name:              pulumi.String("examplemediaacc"),
    			Location:          example.Location,
    			ResourceGroupName: example.Name,
    			StorageAccounts: media.ServiceAccountStorageAccountArray{
    				&media.ServiceAccountStorageAccountArgs{
    					Id:        exampleAccount.ID(),
    					IsPrimary: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = media.NewTransform(ctx, "example", &media.TransformArgs{
    			Name:                     pulumi.String("transform1"),
    			ResourceGroupName:        example.Name,
    			MediaServicesAccountName: exampleServiceAccount.Name,
    			Description:              pulumi.String("My transform description"),
    			Outputs: media.TransformOutputTypeArray{
    				&media.TransformOutputTypeArgs{
    					RelativePriority: pulumi.String("Normal"),
    					OnErrorAction:    pulumi.String("ContinueJob"),
    					BuiltinPreset: &media.TransformOutputBuiltinPresetArgs{
    						PresetName: pulumi.String("AACGoodQualityAudio"),
    						PresetConfiguration: &media.TransformOutputBuiltinPresetPresetConfigurationArgs{
    							Complexity:                pulumi.String("Balanced"),
    							InterleaveOutput:          pulumi.String("NonInterleavedOutput"),
    							KeyFrameIntervalInSeconds: pulumi.Float64(123122.5),
    							MaxBitrateBps:             pulumi.Int(300000),
    							MaxHeight:                 pulumi.Int(480),
    							MaxLayers:                 pulumi.Int(14),
    							MinBitrateBps:             pulumi.Int(200000),
    							MinHeight:                 pulumi.Int(360),
    						},
    					},
    				},
    				&media.TransformOutputTypeArgs{
    					RelativePriority: pulumi.String("Low"),
    					OnErrorAction:    pulumi.String("ContinueJob"),
    					AudioAnalyzerPreset: &media.TransformOutputAudioAnalyzerPresetArgs{
    						AudioLanguage:     pulumi.String("en-US"),
    						AudioAnalysisMode: pulumi.String("Basic"),
    						ExperimentalOptions: pulumi.StringMap{
    							"env": pulumi.String("test"),
    						},
    					},
    				},
    				&media.TransformOutputTypeArgs{
    					RelativePriority: pulumi.String("Low"),
    					OnErrorAction:    pulumi.String("StopProcessingJob"),
    					FaceDetectorPreset: &media.TransformOutputFaceDetectorPresetArgs{
    						AnalysisResolution: pulumi.String("StandardDefinition"),
    						BlurType:           pulumi.String("Med"),
    						FaceRedactorMode:   pulumi.String("Combined"),
    						ExperimentalOptions: pulumi.StringMap{
    							"env": pulumi.String("test"),
    						},
    					},
    				},
    				&media.TransformOutputTypeArgs{
    					RelativePriority: pulumi.String("Normal"),
    					OnErrorAction:    pulumi.String("StopProcessingJob"),
    					VideoAnalyzerPreset: &media.TransformOutputVideoAnalyzerPresetArgs{
    						AudioLanguage:     pulumi.String("en-US"),
    						AudioAnalysisMode: pulumi.String("Basic"),
    						InsightsType:      pulumi.String("AllInsights"),
    						ExperimentalOptions: pulumi.StringMap{
    							"env": pulumi.String("test"),
    						},
    					},
    				},
    				&media.TransformOutputTypeArgs{
    					RelativePriority: pulumi.String("Low"),
    					OnErrorAction:    pulumi.String("ContinueJob"),
    					CustomPreset: &media.TransformOutputCustomPresetArgs{
    						Codecs: media.TransformOutputCustomPresetCodecArray{
    							&media.TransformOutputCustomPresetCodecArgs{
    								AacAudio: &media.TransformOutputCustomPresetCodecAacAudioArgs{
    									Bitrate:      pulumi.Int(128000),
    									Channels:     pulumi.Int(2),
    									SamplingRate: pulumi.Int(48000),
    									Profile:      pulumi.String("AacLc"),
    								},
    							},
    							&media.TransformOutputCustomPresetCodecArgs{
    								CopyAudio: &media.TransformOutputCustomPresetCodecCopyAudioArgs{
    									Label: pulumi.String("test"),
    								},
    							},
    							&media.TransformOutputCustomPresetCodecArgs{
    								CopyVideo: &media.TransformOutputCustomPresetCodecCopyVideoArgs{
    									Label: pulumi.String("test"),
    								},
    							},
    							&media.TransformOutputCustomPresetCodecArgs{
    								H264Video: &media.TransformOutputCustomPresetCodecH264VideoArgs{
    									KeyFrameInterval:            pulumi.String("PT1S"),
    									StretchMode:                 pulumi.String("AutoSize"),
    									SyncMode:                    pulumi.String("Auto"),
    									SceneChangeDetectionEnabled: pulumi.Bool(false),
    									RateControlMode:             pulumi.String("ABR"),
    									Complexity:                  pulumi.String("Quality"),
    									Layers: media.TransformOutputCustomPresetCodecH264VideoLayerArray{
    										&media.TransformOutputCustomPresetCodecH264VideoLayerArgs{
    											Width:                 pulumi.String("64"),
    											Height:                pulumi.String("64"),
    											Bitrate:               pulumi.Int(1045000),
    											MaxBitrate:            pulumi.Int(1045000),
    											BFrames:               pulumi.Int(3),
    											Slices:                pulumi.Int(0),
    											AdaptiveBFrameEnabled: pulumi.Bool(true),
    											Profile:               pulumi.String("Auto"),
    											Level:                 pulumi.String("auto"),
    											BufferWindow:          pulumi.String("PT5S"),
    											ReferenceFrames:       pulumi.Int(4),
    											Crf:                   pulumi.Float64(23),
    											EntropyMode:           pulumi.String("Cabac"),
    										},
    										&media.TransformOutputCustomPresetCodecH264VideoLayerArgs{
    											Width:                 pulumi.String("64"),
    											Height:                pulumi.String("64"),
    											Bitrate:               pulumi.Int(1000),
    											MaxBitrate:            pulumi.Int(1000),
    											BFrames:               pulumi.Int(3),
    											FrameRate:             pulumi.String("32"),
    											Slices:                pulumi.Int(1),
    											AdaptiveBFrameEnabled: pulumi.Bool(true),
    											Profile:               pulumi.String("High444"),
    											Level:                 pulumi.String("auto"),
    											BufferWindow:          pulumi.String("PT5S"),
    											ReferenceFrames:       pulumi.Int(4),
    											Crf:                   pulumi.Float64(23),
    											EntropyMode:           pulumi.String("Cavlc"),
    										},
    									},
    								},
    							},
    							&media.TransformOutputCustomPresetCodecArgs{
    								H265Video: &media.TransformOutputCustomPresetCodecH265VideoArgs{
    									KeyFrameInterval:            pulumi.String("PT2S"),
    									StretchMode:                 pulumi.String("AutoSize"),
    									SyncMode:                    pulumi.String("Auto"),
    									SceneChangeDetectionEnabled: pulumi.Bool(false),
    									Complexity:                  pulumi.String("Speed"),
    									Layers: media.TransformOutputCustomPresetCodecH265VideoLayerArray{
    										&media.TransformOutputCustomPresetCodecH265VideoLayerArgs{
    											Width:                 pulumi.String("64"),
    											Height:                pulumi.String("64"),
    											Bitrate:               pulumi.Int(1045000),
    											MaxBitrate:            pulumi.Int(1045000),
    											BFrames:               pulumi.Int(3),
    											Slices:                pulumi.Int(5),
    											AdaptiveBFrameEnabled: pulumi.Bool(true),
    											Profile:               pulumi.String("Auto"),
    											Label:                 pulumi.String("test"),
    											Level:                 pulumi.String("auto"),
    											BufferWindow:          pulumi.String("PT5S"),
    											FrameRate:             pulumi.String("32"),
    											ReferenceFrames:       pulumi.Int(4),
    											Crf:                   pulumi.Float64(23),
    										},
    									},
    								},
    							},
    							&media.TransformOutputCustomPresetCodecArgs{
    								JpgImage: &media.TransformOutputCustomPresetCodecJpgImageArgs{
    									StretchMode:  pulumi.String("AutoSize"),
    									SyncMode:     pulumi.String("Auto"),
    									Start:        pulumi.String("10"),
    									Range:        pulumi.String("100%%"),
    									SpriteColumn: pulumi.Int(1),
    									Step:         pulumi.String("10"),
    									Layers: media.TransformOutputCustomPresetCodecJpgImageLayerArray{
    										&media.TransformOutputCustomPresetCodecJpgImageLayerArgs{
    											Quality: pulumi.Int(70),
    											Height:  pulumi.String("180"),
    											Label:   pulumi.String("test"),
    											Width:   pulumi.String("120"),
    										},
    									},
    								},
    							},
    							&media.TransformOutputCustomPresetCodecArgs{
    								PngImage: &media.TransformOutputCustomPresetCodecPngImageArgs{
    									StretchMode: pulumi.String("AutoSize"),
    									SyncMode:    pulumi.String("Auto"),
    									Start:       pulumi.String("{Best}"),
    									Range:       pulumi.String("80"),
    									Step:        pulumi.String("10"),
    									Layers: media.TransformOutputCustomPresetCodecPngImageLayerArray{
    										&media.TransformOutputCustomPresetCodecPngImageLayerArgs{
    											Height: pulumi.String("180"),
    											Label:  pulumi.String("test"),
    											Width:  pulumi.String("120"),
    										},
    									},
    								},
    							},
    						},
    						Formats: media.TransformOutputCustomPresetFormatArray{
    							&media.TransformOutputCustomPresetFormatArgs{
    								Jpg: &media.TransformOutputCustomPresetFormatJpgArgs{
    									FilenamePattern: pulumi.String("test{Basename}"),
    								},
    							},
    							&media.TransformOutputCustomPresetFormatArgs{
    								Mp4: &media.TransformOutputCustomPresetFormatMp4Args{
    									FilenamePattern: pulumi.String("test{Bitrate}"),
    									OutputFiles: media.TransformOutputCustomPresetFormatMp4OutputFileArray{
    										&media.TransformOutputCustomPresetFormatMp4OutputFileArgs{
    											Labels: pulumi.StringArray{
    												pulumi.String("test"),
    												pulumi.String("ppe"),
    											},
    										},
    									},
    								},
    							},
    							&media.TransformOutputCustomPresetFormatArgs{
    								Png: &media.TransformOutputCustomPresetFormatPngArgs{
    									FilenamePattern: pulumi.String("test{Basename}"),
    								},
    							},
    							&media.TransformOutputCustomPresetFormatArgs{
    								TransportStream: &media.TransformOutputCustomPresetFormatTransportStreamArgs{
    									FilenamePattern: pulumi.String("test{Bitrate}"),
    									OutputFiles: media.TransformOutputCustomPresetFormatTransportStreamOutputFileArray{
    										&media.TransformOutputCustomPresetFormatTransportStreamOutputFileArgs{
    											Labels: pulumi.StringArray{
    												pulumi.String("prod"),
    											},
    										},
    									},
    								},
    							},
    						},
    						Filter: &media.TransformOutputCustomPresetFilterArgs{
    							CropRectangle: &media.TransformOutputCustomPresetFilterCropRectangleArgs{
    								Height: pulumi.String("240"),
    								Left:   pulumi.String("30"),
    								Top:    pulumi.String("360"),
    								Width:  pulumi.String("70"),
    							},
    							Deinterlace: &media.TransformOutputCustomPresetFilterDeinterlaceArgs{
    								Parity: pulumi.String("TopFieldFirst"),
    								Mode:   pulumi.String("AutoPixelAdaptive"),
    							},
    							FadeIn: &media.TransformOutputCustomPresetFilterFadeInArgs{
    								Duration:  pulumi.String("PT5S"),
    								FadeColor: pulumi.String("0xFF0000"),
    								Start:     pulumi.String("10"),
    							},
    							FadeOut: &media.TransformOutputCustomPresetFilterFadeOutArgs{
    								Duration:  pulumi.String("90%%"),
    								FadeColor: pulumi.String("#FF0C7B"),
    								Start:     pulumi.String("10%%"),
    							},
    							Rotation: pulumi.String("Auto"),
    							Overlays: media.TransformOutputCustomPresetFilterOverlayArray{
    								&media.TransformOutputCustomPresetFilterOverlayArgs{
    									Audio: &media.TransformOutputCustomPresetFilterOverlayAudioArgs{
    										InputLabel:      pulumi.String("label.jpg"),
    										Start:           pulumi.String("PT5S"),
    										End:             pulumi.String("PT30S"),
    										FadeInDuration:  pulumi.String("PT1S"),
    										FadeOutDuration: pulumi.String("PT2S"),
    										AudioGainLevel:  pulumi.Float64(1),
    									},
    								},
    								&media.TransformOutputCustomPresetFilterOverlayArgs{
    									Video: &media.TransformOutputCustomPresetFilterOverlayVideoArgs{
    										InputLabel:      pulumi.String("label.jpg"),
    										Start:           pulumi.String("PT5S"),
    										End:             pulumi.String("PT30S"),
    										FadeInDuration:  pulumi.String("PT1S"),
    										FadeOutDuration: pulumi.String("PT2S"),
    										AudioGainLevel:  pulumi.Float64(1),
    										Opacity:         pulumi.Float64(1),
    										Position: &media.TransformOutputCustomPresetFilterOverlayVideoPositionArgs{
    											Height: pulumi.String("180"),
    											Left:   pulumi.String("20"),
    											Top:    pulumi.String("240"),
    											Width:  pulumi.String("140"),
    										},
    										CropRectangle: &media.TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs{
    											Height: pulumi.String("240"),
    											Left:   pulumi.String("30"),
    											Top:    pulumi.String("360"),
    											Width:  pulumi.String("70"),
    										},
    									},
    								},
    							},
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Azure = Pulumi.Azure;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Azure.Core.ResourceGroup("example", new()
        {
            Name = "media-resources",
            Location = "West Europe",
        });
    
        var exampleAccount = new Azure.Storage.Account("example", new()
        {
            Name = "examplestoracc",
            ResourceGroupName = example.Name,
            Location = example.Location,
            AccountTier = "Standard",
            AccountReplicationType = "GRS",
        });
    
        var exampleServiceAccount = new Azure.Media.ServiceAccount("example", new()
        {
            Name = "examplemediaacc",
            Location = example.Location,
            ResourceGroupName = example.Name,
            StorageAccounts = new[]
            {
                new Azure.Media.Inputs.ServiceAccountStorageAccountArgs
                {
                    Id = exampleAccount.Id,
                    IsPrimary = true,
                },
            },
        });
    
        var exampleTransform = new Azure.Media.Transform("example", new()
        {
            Name = "transform1",
            ResourceGroupName = example.Name,
            MediaServicesAccountName = exampleServiceAccount.Name,
            Description = "My transform description",
            Outputs = new[]
            {
                new Azure.Media.Inputs.TransformOutputArgs
                {
                    RelativePriority = "Normal",
                    OnErrorAction = "ContinueJob",
                    BuiltinPreset = new Azure.Media.Inputs.TransformOutputBuiltinPresetArgs
                    {
                        PresetName = "AACGoodQualityAudio",
                        PresetConfiguration = new Azure.Media.Inputs.TransformOutputBuiltinPresetPresetConfigurationArgs
                        {
                            Complexity = "Balanced",
                            InterleaveOutput = "NonInterleavedOutput",
                            KeyFrameIntervalInSeconds = 123122.5,
                            MaxBitrateBps = 300000,
                            MaxHeight = 480,
                            MaxLayers = 14,
                            MinBitrateBps = 200000,
                            MinHeight = 360,
                        },
                    },
                },
                new Azure.Media.Inputs.TransformOutputArgs
                {
                    RelativePriority = "Low",
                    OnErrorAction = "ContinueJob",
                    AudioAnalyzerPreset = new Azure.Media.Inputs.TransformOutputAudioAnalyzerPresetArgs
                    {
                        AudioLanguage = "en-US",
                        AudioAnalysisMode = "Basic",
                        ExperimentalOptions = 
                        {
                            { "env", "test" },
                        },
                    },
                },
                new Azure.Media.Inputs.TransformOutputArgs
                {
                    RelativePriority = "Low",
                    OnErrorAction = "StopProcessingJob",
                    FaceDetectorPreset = new Azure.Media.Inputs.TransformOutputFaceDetectorPresetArgs
                    {
                        AnalysisResolution = "StandardDefinition",
                        BlurType = "Med",
                        FaceRedactorMode = "Combined",
                        ExperimentalOptions = 
                        {
                            { "env", "test" },
                        },
                    },
                },
                new Azure.Media.Inputs.TransformOutputArgs
                {
                    RelativePriority = "Normal",
                    OnErrorAction = "StopProcessingJob",
                    VideoAnalyzerPreset = new Azure.Media.Inputs.TransformOutputVideoAnalyzerPresetArgs
                    {
                        AudioLanguage = "en-US",
                        AudioAnalysisMode = "Basic",
                        InsightsType = "AllInsights",
                        ExperimentalOptions = 
                        {
                            { "env", "test" },
                        },
                    },
                },
                new Azure.Media.Inputs.TransformOutputArgs
                {
                    RelativePriority = "Low",
                    OnErrorAction = "ContinueJob",
                    CustomPreset = new Azure.Media.Inputs.TransformOutputCustomPresetArgs
                    {
                        Codecs = new[]
                        {
                            new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                            {
                                AacAudio = new Azure.Media.Inputs.TransformOutputCustomPresetCodecAacAudioArgs
                                {
                                    Bitrate = 128000,
                                    Channels = 2,
                                    SamplingRate = 48000,
                                    Profile = "AacLc",
                                },
                            },
                            new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                            {
                                CopyAudio = new Azure.Media.Inputs.TransformOutputCustomPresetCodecCopyAudioArgs
                                {
                                    Label = "test",
                                },
                            },
                            new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                            {
                                CopyVideo = new Azure.Media.Inputs.TransformOutputCustomPresetCodecCopyVideoArgs
                                {
                                    Label = "test",
                                },
                            },
                            new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                            {
                                H264Video = new Azure.Media.Inputs.TransformOutputCustomPresetCodecH264VideoArgs
                                {
                                    KeyFrameInterval = "PT1S",
                                    StretchMode = "AutoSize",
                                    SyncMode = "Auto",
                                    SceneChangeDetectionEnabled = false,
                                    RateControlMode = "ABR",
                                    Complexity = "Quality",
                                    Layers = new[]
                                    {
                                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecH264VideoLayerArgs
                                        {
                                            Width = "64",
                                            Height = "64",
                                            Bitrate = 1045000,
                                            MaxBitrate = 1045000,
                                            BFrames = 3,
                                            Slices = 0,
                                            AdaptiveBFrameEnabled = true,
                                            Profile = "Auto",
                                            Level = "auto",
                                            BufferWindow = "PT5S",
                                            ReferenceFrames = 4,
                                            Crf = 23,
                                            EntropyMode = "Cabac",
                                        },
                                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecH264VideoLayerArgs
                                        {
                                            Width = "64",
                                            Height = "64",
                                            Bitrate = 1000,
                                            MaxBitrate = 1000,
                                            BFrames = 3,
                                            FrameRate = "32",
                                            Slices = 1,
                                            AdaptiveBFrameEnabled = true,
                                            Profile = "High444",
                                            Level = "auto",
                                            BufferWindow = "PT5S",
                                            ReferenceFrames = 4,
                                            Crf = 23,
                                            EntropyMode = "Cavlc",
                                        },
                                    },
                                },
                            },
                            new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                            {
                                H265Video = new Azure.Media.Inputs.TransformOutputCustomPresetCodecH265VideoArgs
                                {
                                    KeyFrameInterval = "PT2S",
                                    StretchMode = "AutoSize",
                                    SyncMode = "Auto",
                                    SceneChangeDetectionEnabled = false,
                                    Complexity = "Speed",
                                    Layers = new[]
                                    {
                                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecH265VideoLayerArgs
                                        {
                                            Width = "64",
                                            Height = "64",
                                            Bitrate = 1045000,
                                            MaxBitrate = 1045000,
                                            BFrames = 3,
                                            Slices = 5,
                                            AdaptiveBFrameEnabled = true,
                                            Profile = "Auto",
                                            Label = "test",
                                            Level = "auto",
                                            BufferWindow = "PT5S",
                                            FrameRate = "32",
                                            ReferenceFrames = 4,
                                            Crf = 23,
                                        },
                                    },
                                },
                            },
                            new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                            {
                                JpgImage = new Azure.Media.Inputs.TransformOutputCustomPresetCodecJpgImageArgs
                                {
                                    StretchMode = "AutoSize",
                                    SyncMode = "Auto",
                                    Start = "10",
                                    Range = "100%%",
                                    SpriteColumn = 1,
                                    Step = "10",
                                    Layers = new[]
                                    {
                                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecJpgImageLayerArgs
                                        {
                                            Quality = 70,
                                            Height = "180",
                                            Label = "test",
                                            Width = "120",
                                        },
                                    },
                                },
                            },
                            new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                            {
                                PngImage = new Azure.Media.Inputs.TransformOutputCustomPresetCodecPngImageArgs
                                {
                                    StretchMode = "AutoSize",
                                    SyncMode = "Auto",
                                    Start = "{Best}",
                                    Range = "80",
                                    Step = "10",
                                    Layers = new[]
                                    {
                                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecPngImageLayerArgs
                                        {
                                            Height = "180",
                                            Label = "test",
                                            Width = "120",
                                        },
                                    },
                                },
                            },
                        },
                        Formats = new[]
                        {
                            new Azure.Media.Inputs.TransformOutputCustomPresetFormatArgs
                            {
                                Jpg = new Azure.Media.Inputs.TransformOutputCustomPresetFormatJpgArgs
                                {
                                    FilenamePattern = "test{Basename}",
                                },
                            },
                            new Azure.Media.Inputs.TransformOutputCustomPresetFormatArgs
                            {
                                Mp4 = new Azure.Media.Inputs.TransformOutputCustomPresetFormatMp4Args
                                {
                                    FilenamePattern = "test{Bitrate}",
                                    OutputFiles = new[]
                                    {
                                        new Azure.Media.Inputs.TransformOutputCustomPresetFormatMp4OutputFileArgs
                                        {
                                            Labels = new[]
                                            {
                                                "test",
                                                "ppe",
                                            },
                                        },
                                    },
                                },
                            },
                            new Azure.Media.Inputs.TransformOutputCustomPresetFormatArgs
                            {
                                Png = new Azure.Media.Inputs.TransformOutputCustomPresetFormatPngArgs
                                {
                                    FilenamePattern = "test{Basename}",
                                },
                            },
                            new Azure.Media.Inputs.TransformOutputCustomPresetFormatArgs
                            {
                                TransportStream = new Azure.Media.Inputs.TransformOutputCustomPresetFormatTransportStreamArgs
                                {
                                    FilenamePattern = "test{Bitrate}",
                                    OutputFiles = new[]
                                    {
                                        new Azure.Media.Inputs.TransformOutputCustomPresetFormatTransportStreamOutputFileArgs
                                        {
                                            Labels = new[]
                                            {
                                                "prod",
                                            },
                                        },
                                    },
                                },
                            },
                        },
                        Filter = new Azure.Media.Inputs.TransformOutputCustomPresetFilterArgs
                        {
                            CropRectangle = new Azure.Media.Inputs.TransformOutputCustomPresetFilterCropRectangleArgs
                            {
                                Height = "240",
                                Left = "30",
                                Top = "360",
                                Width = "70",
                            },
                            Deinterlace = new Azure.Media.Inputs.TransformOutputCustomPresetFilterDeinterlaceArgs
                            {
                                Parity = "TopFieldFirst",
                                Mode = "AutoPixelAdaptive",
                            },
                            FadeIn = new Azure.Media.Inputs.TransformOutputCustomPresetFilterFadeInArgs
                            {
                                Duration = "PT5S",
                                FadeColor = "0xFF0000",
                                Start = "10",
                            },
                            FadeOut = new Azure.Media.Inputs.TransformOutputCustomPresetFilterFadeOutArgs
                            {
                                Duration = "90%%",
                                FadeColor = "#FF0C7B",
                                Start = "10%%",
                            },
                            Rotation = "Auto",
                            Overlays = new[]
                            {
                                new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayArgs
                                {
                                    Audio = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayAudioArgs
                                    {
                                        InputLabel = "label.jpg",
                                        Start = "PT5S",
                                        End = "PT30S",
                                        FadeInDuration = "PT1S",
                                        FadeOutDuration = "PT2S",
                                        AudioGainLevel = 1,
                                    },
                                },
                                new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayArgs
                                {
                                    Video = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayVideoArgs
                                    {
                                        InputLabel = "label.jpg",
                                        Start = "PT5S",
                                        End = "PT30S",
                                        FadeInDuration = "PT1S",
                                        FadeOutDuration = "PT2S",
                                        AudioGainLevel = 1,
                                        Opacity = 1,
                                        Position = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayVideoPositionArgs
                                        {
                                            Height = "180",
                                            Left = "20",
                                            Top = "240",
                                            Width = "140",
                                        },
                                        CropRectangle = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs
                                        {
                                            Height = "240",
                                            Left = "30",
                                            Top = "360",
                                            Width = "70",
                                        },
                                    },
                                },
                            },
                        },
                    },
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azure.core.ResourceGroup;
    import com.pulumi.azure.core.ResourceGroupArgs;
    import com.pulumi.azure.storage.Account;
    import com.pulumi.azure.storage.AccountArgs;
    import com.pulumi.azure.media.ServiceAccount;
    import com.pulumi.azure.media.ServiceAccountArgs;
    import com.pulumi.azure.media.inputs.ServiceAccountStorageAccountArgs;
    import com.pulumi.azure.media.Transform;
    import com.pulumi.azure.media.TransformArgs;
    import com.pulumi.azure.media.inputs.TransformOutputArgs;
    import com.pulumi.azure.media.inputs.TransformOutputBuiltinPresetArgs;
    import com.pulumi.azure.media.inputs.TransformOutputBuiltinPresetPresetConfigurationArgs;
    import com.pulumi.azure.media.inputs.TransformOutputAudioAnalyzerPresetArgs;
    import com.pulumi.azure.media.inputs.TransformOutputFaceDetectorPresetArgs;
    import com.pulumi.azure.media.inputs.TransformOutputVideoAnalyzerPresetArgs;
    import com.pulumi.azure.media.inputs.TransformOutputCustomPresetArgs;
    import com.pulumi.azure.media.inputs.TransformOutputCustomPresetFilterArgs;
    import com.pulumi.azure.media.inputs.TransformOutputCustomPresetFilterCropRectangleArgs;
    import com.pulumi.azure.media.inputs.TransformOutputCustomPresetFilterDeinterlaceArgs;
    import com.pulumi.azure.media.inputs.TransformOutputCustomPresetFilterFadeInArgs;
    import com.pulumi.azure.media.inputs.TransformOutputCustomPresetFilterFadeOutArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new ResourceGroup("example", ResourceGroupArgs.builder()        
                .name("media-resources")
                .location("West Europe")
                .build());
    
            var exampleAccount = new Account("exampleAccount", AccountArgs.builder()        
                .name("examplestoracc")
                .resourceGroupName(example.name())
                .location(example.location())
                .accountTier("Standard")
                .accountReplicationType("GRS")
                .build());
    
            var exampleServiceAccount = new ServiceAccount("exampleServiceAccount", ServiceAccountArgs.builder()        
                .name("examplemediaacc")
                .location(example.location())
                .resourceGroupName(example.name())
                .storageAccounts(ServiceAccountStorageAccountArgs.builder()
                    .id(exampleAccount.id())
                    .isPrimary(true)
                    .build())
                .build());
    
            var exampleTransform = new Transform("exampleTransform", TransformArgs.builder()        
                .name("transform1")
                .resourceGroupName(example.name())
                .mediaServicesAccountName(exampleServiceAccount.name())
                .description("My transform description")
                .outputs(            
                    TransformOutputArgs.builder()
                        .relativePriority("Normal")
                        .onErrorAction("ContinueJob")
                        .builtinPreset(TransformOutputBuiltinPresetArgs.builder()
                            .presetName("AACGoodQualityAudio")
                            .presetConfiguration(TransformOutputBuiltinPresetPresetConfigurationArgs.builder()
                                .complexity("Balanced")
                                .interleaveOutput("NonInterleavedOutput")
                                .keyFrameIntervalInSeconds(123122.5)
                                .maxBitrateBps(300000)
                                .maxHeight(480)
                                .maxLayers(14)
                                .minBitrateBps(200000)
                                .minHeight(360)
                                .build())
                            .build())
                        .build(),
                    TransformOutputArgs.builder()
                        .relativePriority("Low")
                        .onErrorAction("ContinueJob")
                        .audioAnalyzerPreset(TransformOutputAudioAnalyzerPresetArgs.builder()
                            .audioLanguage("en-US")
                            .audioAnalysisMode("Basic")
                            .experimentalOptions(Map.of("env", "test"))
                            .build())
                        .build(),
                    TransformOutputArgs.builder()
                        .relativePriority("Low")
                        .onErrorAction("StopProcessingJob")
                        .faceDetectorPreset(TransformOutputFaceDetectorPresetArgs.builder()
                            .analysisResolution("StandardDefinition")
                            .blurType("Med")
                            .faceRedactorMode("Combined")
                            .experimentalOptions(Map.of("env", "test"))
                            .build())
                        .build(),
                    TransformOutputArgs.builder()
                        .relativePriority("Normal")
                        .onErrorAction("StopProcessingJob")
                        .videoAnalyzerPreset(TransformOutputVideoAnalyzerPresetArgs.builder()
                            .audioLanguage("en-US")
                            .audioAnalysisMode("Basic")
                            .insightsType("AllInsights")
                            .experimentalOptions(Map.of("env", "test"))
                            .build())
                        .build(),
                    TransformOutputArgs.builder()
                        .relativePriority("Low")
                        .onErrorAction("ContinueJob")
                        .customPreset(TransformOutputCustomPresetArgs.builder()
                            .codecs(                        
                                TransformOutputCustomPresetCodecArgs.builder()
                                    .aacAudio(TransformOutputCustomPresetCodecAacAudioArgs.builder()
                                        .bitrate(128000)
                                        .channels(2)
                                        .samplingRate(48000)
                                        .profile("AacLc")
                                        .build())
                                    .build(),
                                TransformOutputCustomPresetCodecArgs.builder()
                                    .copyAudio(TransformOutputCustomPresetCodecCopyAudioArgs.builder()
                                        .label("test")
                                        .build())
                                    .build(),
                                TransformOutputCustomPresetCodecArgs.builder()
                                    .copyVideo(TransformOutputCustomPresetCodecCopyVideoArgs.builder()
                                        .label("test")
                                        .build())
                                    .build(),
                                TransformOutputCustomPresetCodecArgs.builder()
                                    .h264Video(TransformOutputCustomPresetCodecH264VideoArgs.builder()
                                        .keyFrameInterval("PT1S")
                                        .stretchMode("AutoSize")
                                        .syncMode("Auto")
                                        .sceneChangeDetectionEnabled(false)
                                        .rateControlMode("ABR")
                                        .complexity("Quality")
                                        .layers(                                    
                                            TransformOutputCustomPresetCodecH264VideoLayerArgs.builder()
                                                .width("64")
                                                .height("64")
                                                .bitrate(1045000)
                                                .maxBitrate(1045000)
                                                .bFrames(3)
                                                .slices(0)
                                                .adaptiveBFrameEnabled(true)
                                                .profile("Auto")
                                                .level("auto")
                                                .bufferWindow("PT5S")
                                                .referenceFrames(4)
                                                .crf(23)
                                                .entropyMode("Cabac")
                                                .build(),
                                            TransformOutputCustomPresetCodecH264VideoLayerArgs.builder()
                                                .width("64")
                                                .height("64")
                                                .bitrate(1000)
                                                .maxBitrate(1000)
                                                .bFrames(3)
                                                .frameRate("32")
                                                .slices(1)
                                                .adaptiveBFrameEnabled(true)
                                                .profile("High444")
                                                .level("auto")
                                                .bufferWindow("PT5S")
                                                .referenceFrames(4)
                                                .crf(23)
                                                .entropyMode("Cavlc")
                                                .build())
                                        .build())
                                    .build(),
                                TransformOutputCustomPresetCodecArgs.builder()
                                    .h265Video(TransformOutputCustomPresetCodecH265VideoArgs.builder()
                                        .keyFrameInterval("PT2S")
                                        .stretchMode("AutoSize")
                                        .syncMode("Auto")
                                        .sceneChangeDetectionEnabled(false)
                                        .complexity("Speed")
                                        .layers(TransformOutputCustomPresetCodecH265VideoLayerArgs.builder()
                                            .width("64")
                                            .height("64")
                                            .bitrate(1045000)
                                            .maxBitrate(1045000)
                                            .bFrames(3)
                                            .slices(5)
                                            .adaptiveBFrameEnabled(true)
                                            .profile("Auto")
                                            .label("test")
                                            .level("auto")
                                            .bufferWindow("PT5S")
                                            .frameRate("32")
                                            .referenceFrames(4)
                                            .crf(23)
                                            .build())
                                        .build())
                                    .build(),
                                TransformOutputCustomPresetCodecArgs.builder()
                                    .jpgImage(TransformOutputCustomPresetCodecJpgImageArgs.builder()
                                        .stretchMode("AutoSize")
                                        .syncMode("Auto")
                                        .start("10")
                                        .range("100%%")
                                        .spriteColumn(1)
                                        .step("10")
                                        .layers(TransformOutputCustomPresetCodecJpgImageLayerArgs.builder()
                                            .quality(70)
                                            .height("180")
                                            .label("test")
                                            .width("120")
                                            .build())
                                        .build())
                                    .build(),
                                TransformOutputCustomPresetCodecArgs.builder()
                                    .pngImage(TransformOutputCustomPresetCodecPngImageArgs.builder()
                                        .stretchMode("AutoSize")
                                        .syncMode("Auto")
                                        .start("{Best}")
                                        .range("80")
                                        .step("10")
                                        .layers(TransformOutputCustomPresetCodecPngImageLayerArgs.builder()
                                            .height("180")
                                            .label("test")
                                            .width("120")
                                            .build())
                                        .build())
                                    .build())
                            .formats(                        
                                TransformOutputCustomPresetFormatArgs.builder()
                                    .jpg(TransformOutputCustomPresetFormatJpgArgs.builder()
                                        .filenamePattern("test{Basename}")
                                        .build())
                                    .build(),
                                TransformOutputCustomPresetFormatArgs.builder()
                                    .mp4(TransformOutputCustomPresetFormatMp4Args.builder()
                                        .filenamePattern("test{Bitrate}")
                                        .outputFiles(TransformOutputCustomPresetFormatMp4OutputFileArgs.builder()
                                            .labels(                                        
                                                "test",
                                                "ppe")
                                            .build())
                                        .build())
                                    .build(),
                                TransformOutputCustomPresetFormatArgs.builder()
                                    .png(TransformOutputCustomPresetFormatPngArgs.builder()
                                        .filenamePattern("test{Basename}")
                                        .build())
                                    .build(),
                                TransformOutputCustomPresetFormatArgs.builder()
                                    .transportStream(TransformOutputCustomPresetFormatTransportStreamArgs.builder()
                                        .filenamePattern("test{Bitrate}")
                                        .outputFiles(TransformOutputCustomPresetFormatTransportStreamOutputFileArgs.builder()
                                            .labels("prod")
                                            .build())
                                        .build())
                                    .build())
                            .filter(TransformOutputCustomPresetFilterArgs.builder()
                                .cropRectangle(TransformOutputCustomPresetFilterCropRectangleArgs.builder()
                                    .height("240")
                                    .left("30")
                                    .top("360")
                                    .width("70")
                                    .build())
                                .deinterlace(TransformOutputCustomPresetFilterDeinterlaceArgs.builder()
                                    .parity("TopFieldFirst")
                                    .mode("AutoPixelAdaptive")
                                    .build())
                                .fadeIn(TransformOutputCustomPresetFilterFadeInArgs.builder()
                                    .duration("PT5S")
                                    .fadeColor("0xFF0000")
                                    .start("10")
                                    .build())
                                .fadeOut(TransformOutputCustomPresetFilterFadeOutArgs.builder()
                                    .duration("90%%")
                                    .fadeColor("#FF0C7B")
                                    .start("10%%")
                                    .build())
                                .rotation("Auto")
                                .overlays(                            
                                    TransformOutputCustomPresetFilterOverlayArgs.builder()
                                        .audio(TransformOutputCustomPresetFilterOverlayAudioArgs.builder()
                                            .inputLabel("label.jpg")
                                            .start("PT5S")
                                            .end("PT30S")
                                            .fadeInDuration("PT1S")
                                            .fadeOutDuration("PT2S")
                                            .audioGainLevel(1)
                                            .build())
                                        .build(),
                                    TransformOutputCustomPresetFilterOverlayArgs.builder()
                                        .video(TransformOutputCustomPresetFilterOverlayVideoArgs.builder()
                                            .inputLabel("label.jpg")
                                            .start("PT5S")
                                            .end("PT30S")
                                            .fadeInDuration("PT1S")
                                            .fadeOutDuration("PT2S")
                                            .audioGainLevel(1)
                                            .opacity(1)
                                            .position(TransformOutputCustomPresetFilterOverlayVideoPositionArgs.builder()
                                                .height("180")
                                                .left("20")
                                                .top("240")
                                                .width("140")
                                                .build())
                                            .cropRectangle(TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs.builder()
                                                .height("240")
                                                .left("30")
                                                .top("360")
                                                .width("70")
                                                .build())
                                            .build())
                                        .build())
                                .build())
                            .build())
                        .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: azure:core:ResourceGroup
        properties:
          name: media-resources
          location: West Europe
      exampleAccount:
        type: azure:storage:Account
        name: example
        properties:
          name: examplestoracc
          resourceGroupName: ${example.name}
          location: ${example.location}
          accountTier: Standard
          accountReplicationType: GRS
      exampleServiceAccount:
        type: azure:media:ServiceAccount
        name: example
        properties:
          name: examplemediaacc
          location: ${example.location}
          resourceGroupName: ${example.name}
          storageAccounts:
            - id: ${exampleAccount.id}
              isPrimary: true
      exampleTransform:
        type: azure:media:Transform
        name: example
        properties:
          name: transform1
          resourceGroupName: ${example.name}
          mediaServicesAccountName: ${exampleServiceAccount.name}
          description: My transform description
          outputs:
            - relativePriority: Normal
              onErrorAction: ContinueJob
              builtinPreset:
                presetName: AACGoodQualityAudio
                presetConfiguration:
                  complexity: Balanced
                  interleaveOutput: NonInterleavedOutput
                  keyFrameIntervalInSeconds: 123122.5
                  maxBitrateBps: 300000
                  maxHeight: 480
                  maxLayers: 14
                  minBitrateBps: 200000
                  minHeight: 360
            - relativePriority: Low
              onErrorAction: ContinueJob
              audioAnalyzerPreset:
                audioLanguage: en-US
                audioAnalysisMode: Basic
                experimentalOptions:
                  env: test
            - relativePriority: Low
              onErrorAction: StopProcessingJob
              faceDetectorPreset:
                analysisResolution: StandardDefinition
                blurType: Med
                faceRedactorMode: Combined
                experimentalOptions:
                  env: test
            - relativePriority: Normal
              onErrorAction: StopProcessingJob
              videoAnalyzerPreset:
                audioLanguage: en-US
                audioAnalysisMode: Basic
                insightsType: AllInsights
                experimentalOptions:
                  env: test
            - relativePriority: Low
              onErrorAction: ContinueJob
              customPreset:
                codecs:
                  - aacAudio:
                      bitrate: 128000
                      channels: 2
                      samplingRate: 48000
                      profile: AacLc
                  - copyAudio:
                      label: test
                  - copyVideo:
                      label: test
                  - h264Video:
                      keyFrameInterval: PT1S
                      stretchMode: AutoSize
                      syncMode: Auto
                      sceneChangeDetectionEnabled: false
                      rateControlMode: ABR
                      complexity: Quality
                      layers:
                        - width: '64'
                          height: '64'
                          bitrate: 1.045e+06
                          maxBitrate: 1.045e+06
                          bFrames: 3
                          slices: 0
                          adaptiveBFrameEnabled: true
                          profile: Auto
                          level: auto
                          bufferWindow: PT5S
                          referenceFrames: 4
                          crf: 23
                          entropyMode: Cabac
                        - width: '64'
                          height: '64'
                          bitrate: 1000
                          maxBitrate: 1000
                          bFrames: 3
                          frameRate: '32'
                          slices: 1
                          adaptiveBFrameEnabled: true
                          profile: High444
                          level: auto
                          bufferWindow: PT5S
                          referenceFrames: 4
                          crf: 23
                          entropyMode: Cavlc
                  - h265Video:
                      keyFrameInterval: PT2S
                      stretchMode: AutoSize
                      syncMode: Auto
                      sceneChangeDetectionEnabled: false
                      complexity: Speed
                      layers:
                        - width: '64'
                          height: '64'
                          bitrate: 1.045e+06
                          maxBitrate: 1.045e+06
                          bFrames: 3
                          slices: 5
                          adaptiveBFrameEnabled: true
                          profile: Auto
                          label: test
                          level: auto
                          bufferWindow: PT5S
                          frameRate: '32'
                          referenceFrames: 4
                          crf: 23
                  - jpgImage:
                      stretchMode: AutoSize
                      syncMode: Auto
                      start: '10'
                      range: 100%%
                      spriteColumn: 1
                      step: '10'
                      layers:
                        - quality: 70
                          height: '180'
                          label: test
                          width: '120'
                  - pngImage:
                      stretchMode: AutoSize
                      syncMode: Auto
                      start: '{Best}'
                      range: '80'
                      step: '10'
                      layers:
                        - height: '180'
                          label: test
                          width: '120'
                formats:
                  - jpg:
                      filenamePattern: test{Basename}
                  - mp4:
                      filenamePattern: test{Bitrate}
                      outputFiles:
                        - labels:
                            - test
                            - ppe
                  - png:
                      filenamePattern: test{Basename}
                  - transportStream:
                      filenamePattern: test{Bitrate}
                      outputFiles:
                        - labels:
                            - prod
                filter:
                  cropRectangle:
                    height: '240'
                    left: '30'
                    top: '360'
                    width: '70'
                  deinterlace:
                    parity: TopFieldFirst
                    mode: AutoPixelAdaptive
                  fadeIn:
                    duration: PT5S
                    fadeColor: 0xFF0000
                    start: '10'
                  fadeOut:
                    duration: 90%%
                    fadeColor: '#FF0C7B'
                    start: 10%%
                  rotation: Auto
                  overlays:
                    - audio:
                        inputLabel: label.jpg
                        start: PT5S
                        end: PT30S
                        fadeInDuration: PT1S
                        fadeOutDuration: PT2S
                        audioGainLevel: 1
                    - video:
                        inputLabel: label.jpg
                        start: PT5S
                        end: PT30S
                        fadeInDuration: PT1S
                        fadeOutDuration: PT2S
                        audioGainLevel: 1
                        opacity: 1
                        position:
                          height: '180'
                          left: '20'
                          top: '240'
                          width: '140'
                        cropRectangle:
                          height: '240'
                          left: '30'
                          top: '360'
                          width: '70'
    

    Create Transform Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Transform(name: string, args: TransformArgs, opts?: CustomResourceOptions);
    @overload
    def Transform(resource_name: str,
                  args: TransformArgs,
                  opts: Optional[ResourceOptions] = None)
    
    @overload
    def Transform(resource_name: str,
                  opts: Optional[ResourceOptions] = None,
                  media_services_account_name: Optional[str] = None,
                  resource_group_name: Optional[str] = None,
                  description: Optional[str] = None,
                  name: Optional[str] = None,
                  outputs: Optional[Sequence[TransformOutputArgs]] = None)
    func NewTransform(ctx *Context, name string, args TransformArgs, opts ...ResourceOption) (*Transform, error)
    public Transform(string name, TransformArgs args, CustomResourceOptions? opts = null)
    public Transform(String name, TransformArgs args)
    public Transform(String name, TransformArgs args, CustomResourceOptions options)
    
    type: azure:media:Transform
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args TransformArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args TransformArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args TransformArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args TransformArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args TransformArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var transformResource = new Azure.Media.Transform("transformResource", new()
    {
        MediaServicesAccountName = "string",
        ResourceGroupName = "string",
        Description = "string",
        Name = "string",
        Outputs = new[]
        {
            new Azure.Media.Inputs.TransformOutputArgs
            {
                AudioAnalyzerPreset = new Azure.Media.Inputs.TransformOutputAudioAnalyzerPresetArgs
                {
                    AudioAnalysisMode = "string",
                    AudioLanguage = "string",
                    ExperimentalOptions = 
                    {
                        { "string", "string" },
                    },
                },
                BuiltinPreset = new Azure.Media.Inputs.TransformOutputBuiltinPresetArgs
                {
                    PresetName = "string",
                    PresetConfiguration = new Azure.Media.Inputs.TransformOutputBuiltinPresetPresetConfigurationArgs
                    {
                        Complexity = "string",
                        InterleaveOutput = "string",
                        KeyFrameIntervalInSeconds = 0,
                        MaxBitrateBps = 0,
                        MaxHeight = 0,
                        MaxLayers = 0,
                        MinBitrateBps = 0,
                        MinHeight = 0,
                    },
                },
                CustomPreset = new Azure.Media.Inputs.TransformOutputCustomPresetArgs
                {
                    Codecs = new[]
                    {
                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                        {
                            AacAudio = new Azure.Media.Inputs.TransformOutputCustomPresetCodecAacAudioArgs
                            {
                                Bitrate = 0,
                                Channels = 0,
                                Label = "string",
                                Profile = "string",
                                SamplingRate = 0,
                            },
                            CopyAudio = new Azure.Media.Inputs.TransformOutputCustomPresetCodecCopyAudioArgs
                            {
                                Label = "string",
                            },
                            CopyVideo = new Azure.Media.Inputs.TransformOutputCustomPresetCodecCopyVideoArgs
                            {
                                Label = "string",
                            },
                            DdAudio = new Azure.Media.Inputs.TransformOutputCustomPresetCodecDdAudioArgs
                            {
                                Bitrate = 0,
                                Channels = 0,
                                Label = "string",
                                SamplingRate = 0,
                            },
                            H264Video = new Azure.Media.Inputs.TransformOutputCustomPresetCodecH264VideoArgs
                            {
                                Complexity = "string",
                                KeyFrameInterval = "string",
                                Label = "string",
                                Layers = new[]
                                {
                                    new Azure.Media.Inputs.TransformOutputCustomPresetCodecH264VideoLayerArgs
                                    {
                                        Bitrate = 0,
                                        FrameRate = "string",
                                        Label = "string",
                                        BufferWindow = "string",
                                        Crf = 0,
                                        EntropyMode = "string",
                                        AdaptiveBFrameEnabled = false,
                                        Height = "string",
                                        BFrames = 0,
                                        Level = "string",
                                        MaxBitrate = 0,
                                        Profile = "string",
                                        ReferenceFrames = 0,
                                        Slices = 0,
                                        Width = "string",
                                    },
                                },
                                RateControlMode = "string",
                                SceneChangeDetectionEnabled = false,
                                StretchMode = "string",
                                SyncMode = "string",
                            },
                            H265Video = new Azure.Media.Inputs.TransformOutputCustomPresetCodecH265VideoArgs
                            {
                                Complexity = "string",
                                KeyFrameInterval = "string",
                                Label = "string",
                                Layers = new[]
                                {
                                    new Azure.Media.Inputs.TransformOutputCustomPresetCodecH265VideoLayerArgs
                                    {
                                        Bitrate = 0,
                                        Height = "string",
                                        BFrames = 0,
                                        BufferWindow = "string",
                                        Crf = 0,
                                        FrameRate = "string",
                                        AdaptiveBFrameEnabled = false,
                                        Label = "string",
                                        Level = "string",
                                        MaxBitrate = 0,
                                        Profile = "string",
                                        ReferenceFrames = 0,
                                        Slices = 0,
                                        Width = "string",
                                    },
                                },
                                SceneChangeDetectionEnabled = false,
                                StretchMode = "string",
                                SyncMode = "string",
                            },
                            JpgImage = new Azure.Media.Inputs.TransformOutputCustomPresetCodecJpgImageArgs
                            {
                                Start = "string",
                                KeyFrameInterval = "string",
                                Label = "string",
                                Layers = new[]
                                {
                                    new Azure.Media.Inputs.TransformOutputCustomPresetCodecJpgImageLayerArgs
                                    {
                                        Height = "string",
                                        Label = "string",
                                        Quality = 0,
                                        Width = "string",
                                    },
                                },
                                Range = "string",
                                SpriteColumn = 0,
                                Step = "string",
                                StretchMode = "string",
                                SyncMode = "string",
                            },
                            PngImage = new Azure.Media.Inputs.TransformOutputCustomPresetCodecPngImageArgs
                            {
                                Start = "string",
                                KeyFrameInterval = "string",
                                Label = "string",
                                Layers = new[]
                                {
                                    new Azure.Media.Inputs.TransformOutputCustomPresetCodecPngImageLayerArgs
                                    {
                                        Height = "string",
                                        Label = "string",
                                        Width = "string",
                                    },
                                },
                                Range = "string",
                                Step = "string",
                                StretchMode = "string",
                                SyncMode = "string",
                            },
                        },
                    },
                    Formats = new[]
                    {
                        new Azure.Media.Inputs.TransformOutputCustomPresetFormatArgs
                        {
                            Jpg = new Azure.Media.Inputs.TransformOutputCustomPresetFormatJpgArgs
                            {
                                FilenamePattern = "string",
                            },
                            Mp4 = new Azure.Media.Inputs.TransformOutputCustomPresetFormatMp4Args
                            {
                                FilenamePattern = "string",
                                OutputFiles = new[]
                                {
                                    new Azure.Media.Inputs.TransformOutputCustomPresetFormatMp4OutputFileArgs
                                    {
                                        Labels = new[]
                                        {
                                            "string",
                                        },
                                    },
                                },
                            },
                            Png = new Azure.Media.Inputs.TransformOutputCustomPresetFormatPngArgs
                            {
                                FilenamePattern = "string",
                            },
                            TransportStream = new Azure.Media.Inputs.TransformOutputCustomPresetFormatTransportStreamArgs
                            {
                                FilenamePattern = "string",
                                OutputFiles = new[]
                                {
                                    new Azure.Media.Inputs.TransformOutputCustomPresetFormatTransportStreamOutputFileArgs
                                    {
                                        Labels = new[]
                                        {
                                            "string",
                                        },
                                    },
                                },
                            },
                        },
                    },
                    ExperimentalOptions = 
                    {
                        { "string", "string" },
                    },
                    Filter = new Azure.Media.Inputs.TransformOutputCustomPresetFilterArgs
                    {
                        CropRectangle = new Azure.Media.Inputs.TransformOutputCustomPresetFilterCropRectangleArgs
                        {
                            Height = "string",
                            Left = "string",
                            Top = "string",
                            Width = "string",
                        },
                        Deinterlace = new Azure.Media.Inputs.TransformOutputCustomPresetFilterDeinterlaceArgs
                        {
                            Mode = "string",
                            Parity = "string",
                        },
                        FadeIn = new Azure.Media.Inputs.TransformOutputCustomPresetFilterFadeInArgs
                        {
                            Duration = "string",
                            FadeColor = "string",
                            Start = "string",
                        },
                        FadeOut = new Azure.Media.Inputs.TransformOutputCustomPresetFilterFadeOutArgs
                        {
                            Duration = "string",
                            FadeColor = "string",
                            Start = "string",
                        },
                        Overlays = new[]
                        {
                            new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayArgs
                            {
                                Audio = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayAudioArgs
                                {
                                    InputLabel = "string",
                                    AudioGainLevel = 0,
                                    End = "string",
                                    FadeInDuration = "string",
                                    FadeOutDuration = "string",
                                    Start = "string",
                                },
                                Video = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayVideoArgs
                                {
                                    InputLabel = "string",
                                    AudioGainLevel = 0,
                                    CropRectangle = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs
                                    {
                                        Height = "string",
                                        Left = "string",
                                        Top = "string",
                                        Width = "string",
                                    },
                                    End = "string",
                                    FadeInDuration = "string",
                                    FadeOutDuration = "string",
                                    Opacity = 0,
                                    Position = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayVideoPositionArgs
                                    {
                                        Height = "string",
                                        Left = "string",
                                        Top = "string",
                                        Width = "string",
                                    },
                                    Start = "string",
                                },
                            },
                        },
                        Rotation = "string",
                    },
                },
                OnErrorAction = "string",
                RelativePriority = "string",
            },
        },
    });
    
    example, err := media.NewTransform(ctx, "transformResource", &media.TransformArgs{
    	MediaServicesAccountName: pulumi.String("string"),
    	ResourceGroupName:        pulumi.String("string"),
    	Description:              pulumi.String("string"),
    	Name:                     pulumi.String("string"),
    	Outputs: media.TransformOutputTypeArray{
    		&media.TransformOutputTypeArgs{
    			AudioAnalyzerPreset: &media.TransformOutputAudioAnalyzerPresetArgs{
    				AudioAnalysisMode: pulumi.String("string"),
    				AudioLanguage:     pulumi.String("string"),
    				ExperimentalOptions: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    			},
    			BuiltinPreset: &media.TransformOutputBuiltinPresetArgs{
    				PresetName: pulumi.String("string"),
    				PresetConfiguration: &media.TransformOutputBuiltinPresetPresetConfigurationArgs{
    					Complexity:                pulumi.String("string"),
    					InterleaveOutput:          pulumi.String("string"),
    					KeyFrameIntervalInSeconds: pulumi.Float64(0),
    					MaxBitrateBps:             pulumi.Int(0),
    					MaxHeight:                 pulumi.Int(0),
    					MaxLayers:                 pulumi.Int(0),
    					MinBitrateBps:             pulumi.Int(0),
    					MinHeight:                 pulumi.Int(0),
    				},
    			},
    			CustomPreset: &media.TransformOutputCustomPresetArgs{
    				Codecs: media.TransformOutputCustomPresetCodecArray{
    					&media.TransformOutputCustomPresetCodecArgs{
    						AacAudio: &media.TransformOutputCustomPresetCodecAacAudioArgs{
    							Bitrate:      pulumi.Int(0),
    							Channels:     pulumi.Int(0),
    							Label:        pulumi.String("string"),
    							Profile:      pulumi.String("string"),
    							SamplingRate: pulumi.Int(0),
    						},
    						CopyAudio: &media.TransformOutputCustomPresetCodecCopyAudioArgs{
    							Label: pulumi.String("string"),
    						},
    						CopyVideo: &media.TransformOutputCustomPresetCodecCopyVideoArgs{
    							Label: pulumi.String("string"),
    						},
    						DdAudio: &media.TransformOutputCustomPresetCodecDdAudioArgs{
    							Bitrate:      pulumi.Int(0),
    							Channels:     pulumi.Int(0),
    							Label:        pulumi.String("string"),
    							SamplingRate: pulumi.Int(0),
    						},
    						H264Video: &media.TransformOutputCustomPresetCodecH264VideoArgs{
    							Complexity:       pulumi.String("string"),
    							KeyFrameInterval: pulumi.String("string"),
    							Label:            pulumi.String("string"),
    							Layers: media.TransformOutputCustomPresetCodecH264VideoLayerArray{
    								&media.TransformOutputCustomPresetCodecH264VideoLayerArgs{
    									Bitrate:               pulumi.Int(0),
    									FrameRate:             pulumi.String("string"),
    									Label:                 pulumi.String("string"),
    									BufferWindow:          pulumi.String("string"),
    									Crf:                   pulumi.Float64(0),
    									EntropyMode:           pulumi.String("string"),
    									AdaptiveBFrameEnabled: pulumi.Bool(false),
    									Height:                pulumi.String("string"),
    									BFrames:               pulumi.Int(0),
    									Level:                 pulumi.String("string"),
    									MaxBitrate:            pulumi.Int(0),
    									Profile:               pulumi.String("string"),
    									ReferenceFrames:       pulumi.Int(0),
    									Slices:                pulumi.Int(0),
    									Width:                 pulumi.String("string"),
    								},
    							},
    							RateControlMode:             pulumi.String("string"),
    							SceneChangeDetectionEnabled: pulumi.Bool(false),
    							StretchMode:                 pulumi.String("string"),
    							SyncMode:                    pulumi.String("string"),
    						},
    						H265Video: &media.TransformOutputCustomPresetCodecH265VideoArgs{
    							Complexity:       pulumi.String("string"),
    							KeyFrameInterval: pulumi.String("string"),
    							Label:            pulumi.String("string"),
    							Layers: media.TransformOutputCustomPresetCodecH265VideoLayerArray{
    								&media.TransformOutputCustomPresetCodecH265VideoLayerArgs{
    									Bitrate:               pulumi.Int(0),
    									Height:                pulumi.String("string"),
    									BFrames:               pulumi.Int(0),
    									BufferWindow:          pulumi.String("string"),
    									Crf:                   pulumi.Float64(0),
    									FrameRate:             pulumi.String("string"),
    									AdaptiveBFrameEnabled: pulumi.Bool(false),
    									Label:                 pulumi.String("string"),
    									Level:                 pulumi.String("string"),
    									MaxBitrate:            pulumi.Int(0),
    									Profile:               pulumi.String("string"),
    									ReferenceFrames:       pulumi.Int(0),
    									Slices:                pulumi.Int(0),
    									Width:                 pulumi.String("string"),
    								},
    							},
    							SceneChangeDetectionEnabled: pulumi.Bool(false),
    							StretchMode:                 pulumi.String("string"),
    							SyncMode:                    pulumi.String("string"),
    						},
    						JpgImage: &media.TransformOutputCustomPresetCodecJpgImageArgs{
    							Start:            pulumi.String("string"),
    							KeyFrameInterval: pulumi.String("string"),
    							Label:            pulumi.String("string"),
    							Layers: media.TransformOutputCustomPresetCodecJpgImageLayerArray{
    								&media.TransformOutputCustomPresetCodecJpgImageLayerArgs{
    									Height:  pulumi.String("string"),
    									Label:   pulumi.String("string"),
    									Quality: pulumi.Int(0),
    									Width:   pulumi.String("string"),
    								},
    							},
    							Range:        pulumi.String("string"),
    							SpriteColumn: pulumi.Int(0),
    							Step:         pulumi.String("string"),
    							StretchMode:  pulumi.String("string"),
    							SyncMode:     pulumi.String("string"),
    						},
    						PngImage: &media.TransformOutputCustomPresetCodecPngImageArgs{
    							Start:            pulumi.String("string"),
    							KeyFrameInterval: pulumi.String("string"),
    							Label:            pulumi.String("string"),
    							Layers: media.TransformOutputCustomPresetCodecPngImageLayerArray{
    								&media.TransformOutputCustomPresetCodecPngImageLayerArgs{
    									Height: pulumi.String("string"),
    									Label:  pulumi.String("string"),
    									Width:  pulumi.String("string"),
    								},
    							},
    							Range:       pulumi.String("string"),
    							Step:        pulumi.String("string"),
    							StretchMode: pulumi.String("string"),
    							SyncMode:    pulumi.String("string"),
    						},
    					},
    				},
    				Formats: media.TransformOutputCustomPresetFormatArray{
    					&media.TransformOutputCustomPresetFormatArgs{
    						Jpg: &media.TransformOutputCustomPresetFormatJpgArgs{
    							FilenamePattern: pulumi.String("string"),
    						},
    						Mp4: &media.TransformOutputCustomPresetFormatMp4Args{
    							FilenamePattern: pulumi.String("string"),
    							OutputFiles: media.TransformOutputCustomPresetFormatMp4OutputFileArray{
    								&media.TransformOutputCustomPresetFormatMp4OutputFileArgs{
    									Labels: pulumi.StringArray{
    										pulumi.String("string"),
    									},
    								},
    							},
    						},
    						Png: &media.TransformOutputCustomPresetFormatPngArgs{
    							FilenamePattern: pulumi.String("string"),
    						},
    						TransportStream: &media.TransformOutputCustomPresetFormatTransportStreamArgs{
    							FilenamePattern: pulumi.String("string"),
    							OutputFiles: media.TransformOutputCustomPresetFormatTransportStreamOutputFileArray{
    								&media.TransformOutputCustomPresetFormatTransportStreamOutputFileArgs{
    									Labels: pulumi.StringArray{
    										pulumi.String("string"),
    									},
    								},
    							},
    						},
    					},
    				},
    				ExperimentalOptions: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    				Filter: &media.TransformOutputCustomPresetFilterArgs{
    					CropRectangle: &media.TransformOutputCustomPresetFilterCropRectangleArgs{
    						Height: pulumi.String("string"),
    						Left:   pulumi.String("string"),
    						Top:    pulumi.String("string"),
    						Width:  pulumi.String("string"),
    					},
    					Deinterlace: &media.TransformOutputCustomPresetFilterDeinterlaceArgs{
    						Mode:   pulumi.String("string"),
    						Parity: pulumi.String("string"),
    					},
    					FadeIn: &media.TransformOutputCustomPresetFilterFadeInArgs{
    						Duration:  pulumi.String("string"),
    						FadeColor: pulumi.String("string"),
    						Start:     pulumi.String("string"),
    					},
    					FadeOut: &media.TransformOutputCustomPresetFilterFadeOutArgs{
    						Duration:  pulumi.String("string"),
    						FadeColor: pulumi.String("string"),
    						Start:     pulumi.String("string"),
    					},
    					Overlays: media.TransformOutputCustomPresetFilterOverlayArray{
    						&media.TransformOutputCustomPresetFilterOverlayArgs{
    							Audio: &media.TransformOutputCustomPresetFilterOverlayAudioArgs{
    								InputLabel:      pulumi.String("string"),
    								AudioGainLevel:  pulumi.Float64(0),
    								End:             pulumi.String("string"),
    								FadeInDuration:  pulumi.String("string"),
    								FadeOutDuration: pulumi.String("string"),
    								Start:           pulumi.String("string"),
    							},
    							Video: &media.TransformOutputCustomPresetFilterOverlayVideoArgs{
    								InputLabel:     pulumi.String("string"),
    								AudioGainLevel: pulumi.Float64(0),
    								CropRectangle: &media.TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs{
    									Height: pulumi.String("string"),
    									Left:   pulumi.String("string"),
    									Top:    pulumi.String("string"),
    									Width:  pulumi.String("string"),
    								},
    								End:             pulumi.String("string"),
    								FadeInDuration:  pulumi.String("string"),
    								FadeOutDuration: pulumi.String("string"),
    								Opacity:         pulumi.Float64(0),
    								Position: &media.TransformOutputCustomPresetFilterOverlayVideoPositionArgs{
    									Height: pulumi.String("string"),
    									Left:   pulumi.String("string"),
    									Top:    pulumi.String("string"),
    									Width:  pulumi.String("string"),
    								},
    								Start: pulumi.String("string"),
    							},
    						},
    					},
    					Rotation: pulumi.String("string"),
    				},
    			},
    			OnErrorAction:    pulumi.String("string"),
    			RelativePriority: pulumi.String("string"),
    		},
    	},
    })
    
    var transformResource = new Transform("transformResource", TransformArgs.builder()        
        .mediaServicesAccountName("string")
        .resourceGroupName("string")
        .description("string")
        .name("string")
        .outputs(TransformOutputArgs.builder()
            .audioAnalyzerPreset(TransformOutputAudioAnalyzerPresetArgs.builder()
                .audioAnalysisMode("string")
                .audioLanguage("string")
                .experimentalOptions(Map.of("string", "string"))
                .build())
            .builtinPreset(TransformOutputBuiltinPresetArgs.builder()
                .presetName("string")
                .presetConfiguration(TransformOutputBuiltinPresetPresetConfigurationArgs.builder()
                    .complexity("string")
                    .interleaveOutput("string")
                    .keyFrameIntervalInSeconds(0)
                    .maxBitrateBps(0)
                    .maxHeight(0)
                    .maxLayers(0)
                    .minBitrateBps(0)
                    .minHeight(0)
                    .build())
                .build())
            .customPreset(TransformOutputCustomPresetArgs.builder()
                .codecs(TransformOutputCustomPresetCodecArgs.builder()
                    .aacAudio(TransformOutputCustomPresetCodecAacAudioArgs.builder()
                        .bitrate(0)
                        .channels(0)
                        .label("string")
                        .profile("string")
                        .samplingRate(0)
                        .build())
                    .copyAudio(TransformOutputCustomPresetCodecCopyAudioArgs.builder()
                        .label("string")
                        .build())
                    .copyVideo(TransformOutputCustomPresetCodecCopyVideoArgs.builder()
                        .label("string")
                        .build())
                    .ddAudio(TransformOutputCustomPresetCodecDdAudioArgs.builder()
                        .bitrate(0)
                        .channels(0)
                        .label("string")
                        .samplingRate(0)
                        .build())
                    .h264Video(TransformOutputCustomPresetCodecH264VideoArgs.builder()
                        .complexity("string")
                        .keyFrameInterval("string")
                        .label("string")
                        .layers(TransformOutputCustomPresetCodecH264VideoLayerArgs.builder()
                            .bitrate(0)
                            .frameRate("string")
                            .label("string")
                            .bufferWindow("string")
                            .crf(0)
                            .entropyMode("string")
                            .adaptiveBFrameEnabled(false)
                            .height("string")
                            .bFrames(0)
                            .level("string")
                            .maxBitrate(0)
                            .profile("string")
                            .referenceFrames(0)
                            .slices(0)
                            .width("string")
                            .build())
                        .rateControlMode("string")
                        .sceneChangeDetectionEnabled(false)
                        .stretchMode("string")
                        .syncMode("string")
                        .build())
                    .h265Video(TransformOutputCustomPresetCodecH265VideoArgs.builder()
                        .complexity("string")
                        .keyFrameInterval("string")
                        .label("string")
                        .layers(TransformOutputCustomPresetCodecH265VideoLayerArgs.builder()
                            .bitrate(0)
                            .height("string")
                            .bFrames(0)
                            .bufferWindow("string")
                            .crf(0)
                            .frameRate("string")
                            .adaptiveBFrameEnabled(false)
                            .label("string")
                            .level("string")
                            .maxBitrate(0)
                            .profile("string")
                            .referenceFrames(0)
                            .slices(0)
                            .width("string")
                            .build())
                        .sceneChangeDetectionEnabled(false)
                        .stretchMode("string")
                        .syncMode("string")
                        .build())
                    .jpgImage(TransformOutputCustomPresetCodecJpgImageArgs.builder()
                        .start("string")
                        .keyFrameInterval("string")
                        .label("string")
                        .layers(TransformOutputCustomPresetCodecJpgImageLayerArgs.builder()
                            .height("string")
                            .label("string")
                            .quality(0)
                            .width("string")
                            .build())
                        .range("string")
                        .spriteColumn(0)
                        .step("string")
                        .stretchMode("string")
                        .syncMode("string")
                        .build())
                    .pngImage(TransformOutputCustomPresetCodecPngImageArgs.builder()
                        .start("string")
                        .keyFrameInterval("string")
                        .label("string")
                        .layers(TransformOutputCustomPresetCodecPngImageLayerArgs.builder()
                            .height("string")
                            .label("string")
                            .width("string")
                            .build())
                        .range("string")
                        .step("string")
                        .stretchMode("string")
                        .syncMode("string")
                        .build())
                    .build())
                .formats(TransformOutputCustomPresetFormatArgs.builder()
                    .jpg(TransformOutputCustomPresetFormatJpgArgs.builder()
                        .filenamePattern("string")
                        .build())
                    .mp4(TransformOutputCustomPresetFormatMp4Args.builder()
                        .filenamePattern("string")
                        .outputFiles(TransformOutputCustomPresetFormatMp4OutputFileArgs.builder()
                            .labels("string")
                            .build())
                        .build())
                    .png(TransformOutputCustomPresetFormatPngArgs.builder()
                        .filenamePattern("string")
                        .build())
                    .transportStream(TransformOutputCustomPresetFormatTransportStreamArgs.builder()
                        .filenamePattern("string")
                        .outputFiles(TransformOutputCustomPresetFormatTransportStreamOutputFileArgs.builder()
                            .labels("string")
                            .build())
                        .build())
                    .build())
                .experimentalOptions(Map.of("string", "string"))
                .filter(TransformOutputCustomPresetFilterArgs.builder()
                    .cropRectangle(TransformOutputCustomPresetFilterCropRectangleArgs.builder()
                        .height("string")
                        .left("string")
                        .top("string")
                        .width("string")
                        .build())
                    .deinterlace(TransformOutputCustomPresetFilterDeinterlaceArgs.builder()
                        .mode("string")
                        .parity("string")
                        .build())
                    .fadeIn(TransformOutputCustomPresetFilterFadeInArgs.builder()
                        .duration("string")
                        .fadeColor("string")
                        .start("string")
                        .build())
                    .fadeOut(TransformOutputCustomPresetFilterFadeOutArgs.builder()
                        .duration("string")
                        .fadeColor("string")
                        .start("string")
                        .build())
                    .overlays(TransformOutputCustomPresetFilterOverlayArgs.builder()
                        .audio(TransformOutputCustomPresetFilterOverlayAudioArgs.builder()
                            .inputLabel("string")
                            .audioGainLevel(0)
                            .end("string")
                            .fadeInDuration("string")
                            .fadeOutDuration("string")
                            .start("string")
                            .build())
                        .video(TransformOutputCustomPresetFilterOverlayVideoArgs.builder()
                            .inputLabel("string")
                            .audioGainLevel(0)
                            .cropRectangle(TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs.builder()
                                .height("string")
                                .left("string")
                                .top("string")
                                .width("string")
                                .build())
                            .end("string")
                            .fadeInDuration("string")
                            .fadeOutDuration("string")
                            .opacity(0)
                            .position(TransformOutputCustomPresetFilterOverlayVideoPositionArgs.builder()
                                .height("string")
                                .left("string")
                                .top("string")
                                .width("string")
                                .build())
                            .start("string")
                            .build())
                        .build())
                    .rotation("string")
                    .build())
                .build())
            .onErrorAction("string")
            .relativePriority("string")
            .build())
        .build());
    
    transform_resource = azure.media.Transform("transformResource",
        media_services_account_name="string",
        resource_group_name="string",
        description="string",
        name="string",
        outputs=[azure.media.TransformOutputArgs(
            audio_analyzer_preset=azure.media.TransformOutputAudioAnalyzerPresetArgs(
                audio_analysis_mode="string",
                audio_language="string",
                experimental_options={
                    "string": "string",
                },
            ),
            builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
                preset_name="string",
                preset_configuration=azure.media.TransformOutputBuiltinPresetPresetConfigurationArgs(
                    complexity="string",
                    interleave_output="string",
                    key_frame_interval_in_seconds=0,
                    max_bitrate_bps=0,
                    max_height=0,
                    max_layers=0,
                    min_bitrate_bps=0,
                    min_height=0,
                ),
            ),
            custom_preset=azure.media.TransformOutputCustomPresetArgs(
                codecs=[azure.media.TransformOutputCustomPresetCodecArgs(
                    aac_audio=azure.media.TransformOutputCustomPresetCodecAacAudioArgs(
                        bitrate=0,
                        channels=0,
                        label="string",
                        profile="string",
                        sampling_rate=0,
                    ),
                    copy_audio=azure.media.TransformOutputCustomPresetCodecCopyAudioArgs(
                        label="string",
                    ),
                    copy_video=azure.media.TransformOutputCustomPresetCodecCopyVideoArgs(
                        label="string",
                    ),
                    dd_audio=azure.media.TransformOutputCustomPresetCodecDdAudioArgs(
                        bitrate=0,
                        channels=0,
                        label="string",
                        sampling_rate=0,
                    ),
                    h264_video=azure.media.TransformOutputCustomPresetCodecH264VideoArgs(
                        complexity="string",
                        key_frame_interval="string",
                        label="string",
                        layers=[azure.media.TransformOutputCustomPresetCodecH264VideoLayerArgs(
                            bitrate=0,
                            frame_rate="string",
                            label="string",
                            buffer_window="string",
                            crf=0,
                            entropy_mode="string",
                            adaptive_b_frame_enabled=False,
                            height="string",
                            b_frames=0,
                            level="string",
                            max_bitrate=0,
                            profile="string",
                            reference_frames=0,
                            slices=0,
                            width="string",
                        )],
                        rate_control_mode="string",
                        scene_change_detection_enabled=False,
                        stretch_mode="string",
                        sync_mode="string",
                    ),
                    h265_video=azure.media.TransformOutputCustomPresetCodecH265VideoArgs(
                        complexity="string",
                        key_frame_interval="string",
                        label="string",
                        layers=[azure.media.TransformOutputCustomPresetCodecH265VideoLayerArgs(
                            bitrate=0,
                            height="string",
                            b_frames=0,
                            buffer_window="string",
                            crf=0,
                            frame_rate="string",
                            adaptive_b_frame_enabled=False,
                            label="string",
                            level="string",
                            max_bitrate=0,
                            profile="string",
                            reference_frames=0,
                            slices=0,
                            width="string",
                        )],
                        scene_change_detection_enabled=False,
                        stretch_mode="string",
                        sync_mode="string",
                    ),
                    jpg_image=azure.media.TransformOutputCustomPresetCodecJpgImageArgs(
                        start="string",
                        key_frame_interval="string",
                        label="string",
                        layers=[azure.media.TransformOutputCustomPresetCodecJpgImageLayerArgs(
                            height="string",
                            label="string",
                            quality=0,
                            width="string",
                        )],
                        range="string",
                        sprite_column=0,
                        step="string",
                        stretch_mode="string",
                        sync_mode="string",
                    ),
                    png_image=azure.media.TransformOutputCustomPresetCodecPngImageArgs(
                        start="string",
                        key_frame_interval="string",
                        label="string",
                        layers=[azure.media.TransformOutputCustomPresetCodecPngImageLayerArgs(
                            height="string",
                            label="string",
                            width="string",
                        )],
                        range="string",
                        step="string",
                        stretch_mode="string",
                        sync_mode="string",
                    ),
                )],
                formats=[azure.media.TransformOutputCustomPresetFormatArgs(
                    jpg=azure.media.TransformOutputCustomPresetFormatJpgArgs(
                        filename_pattern="string",
                    ),
                    mp4=azure.media.TransformOutputCustomPresetFormatMp4Args(
                        filename_pattern="string",
                        output_files=[azure.media.TransformOutputCustomPresetFormatMp4OutputFileArgs(
                            labels=["string"],
                        )],
                    ),
                    png=azure.media.TransformOutputCustomPresetFormatPngArgs(
                        filename_pattern="string",
                    ),
                    transport_stream=azure.media.TransformOutputCustomPresetFormatTransportStreamArgs(
                        filename_pattern="string",
                        output_files=[azure.media.TransformOutputCustomPresetFormatTransportStreamOutputFileArgs(
                            labels=["string"],
                        )],
                    ),
                )],
                experimental_options={
                    "string": "string",
                },
                filter=azure.media.TransformOutputCustomPresetFilterArgs(
                    crop_rectangle=azure.media.TransformOutputCustomPresetFilterCropRectangleArgs(
                        height="string",
                        left="string",
                        top="string",
                        width="string",
                    ),
                    deinterlace=azure.media.TransformOutputCustomPresetFilterDeinterlaceArgs(
                        mode="string",
                        parity="string",
                    ),
                    fade_in=azure.media.TransformOutputCustomPresetFilterFadeInArgs(
                        duration="string",
                        fade_color="string",
                        start="string",
                    ),
                    fade_out=azure.media.TransformOutputCustomPresetFilterFadeOutArgs(
                        duration="string",
                        fade_color="string",
                        start="string",
                    ),
                    overlays=[azure.media.TransformOutputCustomPresetFilterOverlayArgs(
                        audio=azure.media.TransformOutputCustomPresetFilterOverlayAudioArgs(
                            input_label="string",
                            audio_gain_level=0,
                            end="string",
                            fade_in_duration="string",
                            fade_out_duration="string",
                            start="string",
                        ),
                        video=azure.media.TransformOutputCustomPresetFilterOverlayVideoArgs(
                            input_label="string",
                            audio_gain_level=0,
                            crop_rectangle=azure.media.TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs(
                                height="string",
                                left="string",
                                top="string",
                                width="string",
                            ),
                            end="string",
                            fade_in_duration="string",
                            fade_out_duration="string",
                            opacity=0,
                            position=azure.media.TransformOutputCustomPresetFilterOverlayVideoPositionArgs(
                                height="string",
                                left="string",
                                top="string",
                                width="string",
                            ),
                            start="string",
                        ),
                    )],
                    rotation="string",
                ),
            ),
            on_error_action="string",
            relative_priority="string",
        )])
    
    const transformResource = new azure.media.Transform("transformResource", {
        mediaServicesAccountName: "string",
        resourceGroupName: "string",
        description: "string",
        name: "string",
        outputs: [{
            audioAnalyzerPreset: {
                audioAnalysisMode: "string",
                audioLanguage: "string",
                experimentalOptions: {
                    string: "string",
                },
            },
            builtinPreset: {
                presetName: "string",
                presetConfiguration: {
                    complexity: "string",
                    interleaveOutput: "string",
                    keyFrameIntervalInSeconds: 0,
                    maxBitrateBps: 0,
                    maxHeight: 0,
                    maxLayers: 0,
                    minBitrateBps: 0,
                    minHeight: 0,
                },
            },
            customPreset: {
                codecs: [{
                    aacAudio: {
                        bitrate: 0,
                        channels: 0,
                        label: "string",
                        profile: "string",
                        samplingRate: 0,
                    },
                    copyAudio: {
                        label: "string",
                    },
                    copyVideo: {
                        label: "string",
                    },
                    ddAudio: {
                        bitrate: 0,
                        channels: 0,
                        label: "string",
                        samplingRate: 0,
                    },
                    h264Video: {
                        complexity: "string",
                        keyFrameInterval: "string",
                        label: "string",
                        layers: [{
                            bitrate: 0,
                            frameRate: "string",
                            label: "string",
                            bufferWindow: "string",
                            crf: 0,
                            entropyMode: "string",
                            adaptiveBFrameEnabled: false,
                            height: "string",
                            bFrames: 0,
                            level: "string",
                            maxBitrate: 0,
                            profile: "string",
                            referenceFrames: 0,
                            slices: 0,
                            width: "string",
                        }],
                        rateControlMode: "string",
                        sceneChangeDetectionEnabled: false,
                        stretchMode: "string",
                        syncMode: "string",
                    },
                    h265Video: {
                        complexity: "string",
                        keyFrameInterval: "string",
                        label: "string",
                        layers: [{
                            bitrate: 0,
                            height: "string",
                            bFrames: 0,
                            bufferWindow: "string",
                            crf: 0,
                            frameRate: "string",
                            adaptiveBFrameEnabled: false,
                            label: "string",
                            level: "string",
                            maxBitrate: 0,
                            profile: "string",
                            referenceFrames: 0,
                            slices: 0,
                            width: "string",
                        }],
                        sceneChangeDetectionEnabled: false,
                        stretchMode: "string",
                        syncMode: "string",
                    },
                    jpgImage: {
                        start: "string",
                        keyFrameInterval: "string",
                        label: "string",
                        layers: [{
                            height: "string",
                            label: "string",
                            quality: 0,
                            width: "string",
                        }],
                        range: "string",
                        spriteColumn: 0,
                        step: "string",
                        stretchMode: "string",
                        syncMode: "string",
                    },
                    pngImage: {
                        start: "string",
                        keyFrameInterval: "string",
                        label: "string",
                        layers: [{
                            height: "string",
                            label: "string",
                            width: "string",
                        }],
                        range: "string",
                        step: "string",
                        stretchMode: "string",
                        syncMode: "string",
                    },
                }],
                formats: [{
                    jpg: {
                        filenamePattern: "string",
                    },
                    mp4: {
                        filenamePattern: "string",
                        outputFiles: [{
                            labels: ["string"],
                        }],
                    },
                    png: {
                        filenamePattern: "string",
                    },
                    transportStream: {
                        filenamePattern: "string",
                        outputFiles: [{
                            labels: ["string"],
                        }],
                    },
                }],
                experimentalOptions: {
                    string: "string",
                },
                filter: {
                    cropRectangle: {
                        height: "string",
                        left: "string",
                        top: "string",
                        width: "string",
                    },
                    deinterlace: {
                        mode: "string",
                        parity: "string",
                    },
                    fadeIn: {
                        duration: "string",
                        fadeColor: "string",
                        start: "string",
                    },
                    fadeOut: {
                        duration: "string",
                        fadeColor: "string",
                        start: "string",
                    },
                    overlays: [{
                        audio: {
                            inputLabel: "string",
                            audioGainLevel: 0,
                            end: "string",
                            fadeInDuration: "string",
                            fadeOutDuration: "string",
                            start: "string",
                        },
                        video: {
                            inputLabel: "string",
                            audioGainLevel: 0,
                            cropRectangle: {
                                height: "string",
                                left: "string",
                                top: "string",
                                width: "string",
                            },
                            end: "string",
                            fadeInDuration: "string",
                            fadeOutDuration: "string",
                            opacity: 0,
                            position: {
                                height: "string",
                                left: "string",
                                top: "string",
                                width: "string",
                            },
                            start: "string",
                        },
                    }],
                    rotation: "string",
                },
            },
            onErrorAction: "string",
            relativePriority: "string",
        }],
    });
    
    type: azure:media:Transform
    properties:
        description: string
        mediaServicesAccountName: string
        name: string
        outputs:
            - audioAnalyzerPreset:
                audioAnalysisMode: string
                audioLanguage: string
                experimentalOptions:
                    string: string
              builtinPreset:
                presetConfiguration:
                    complexity: string
                    interleaveOutput: string
                    keyFrameIntervalInSeconds: 0
                    maxBitrateBps: 0
                    maxHeight: 0
                    maxLayers: 0
                    minBitrateBps: 0
                    minHeight: 0
                presetName: string
              customPreset:
                codecs:
                    - aacAudio:
                        bitrate: 0
                        channels: 0
                        label: string
                        profile: string
                        samplingRate: 0
                      copyAudio:
                        label: string
                      copyVideo:
                        label: string
                      ddAudio:
                        bitrate: 0
                        channels: 0
                        label: string
                        samplingRate: 0
                      h264Video:
                        complexity: string
                        keyFrameInterval: string
                        label: string
                        layers:
                            - adaptiveBFrameEnabled: false
                              bFrames: 0
                              bitrate: 0
                              bufferWindow: string
                              crf: 0
                              entropyMode: string
                              frameRate: string
                              height: string
                              label: string
                              level: string
                              maxBitrate: 0
                              profile: string
                              referenceFrames: 0
                              slices: 0
                              width: string
                        rateControlMode: string
                        sceneChangeDetectionEnabled: false
                        stretchMode: string
                        syncMode: string
                      h265Video:
                        complexity: string
                        keyFrameInterval: string
                        label: string
                        layers:
                            - adaptiveBFrameEnabled: false
                              bFrames: 0
                              bitrate: 0
                              bufferWindow: string
                              crf: 0
                              frameRate: string
                              height: string
                              label: string
                              level: string
                              maxBitrate: 0
                              profile: string
                              referenceFrames: 0
                              slices: 0
                              width: string
                        sceneChangeDetectionEnabled: false
                        stretchMode: string
                        syncMode: string
                      jpgImage:
                        keyFrameInterval: string
                        label: string
                        layers:
                            - height: string
                              label: string
                              quality: 0
                              width: string
                        range: string
                        spriteColumn: 0
                        start: string
                        step: string
                        stretchMode: string
                        syncMode: string
                      pngImage:
                        keyFrameInterval: string
                        label: string
                        layers:
                            - height: string
                              label: string
                              width: string
                        range: string
                        start: string
                        step: string
                        stretchMode: string
                        syncMode: string
                experimentalOptions:
                    string: string
                filter:
                    cropRectangle:
                        height: string
                        left: string
                        top: string
                        width: string
                    deinterlace:
                        mode: string
                        parity: string
                    fadeIn:
                        duration: string
                        fadeColor: string
                        start: string
                    fadeOut:
                        duration: string
                        fadeColor: string
                        start: string
                    overlays:
                        - audio:
                            audioGainLevel: 0
                            end: string
                            fadeInDuration: string
                            fadeOutDuration: string
                            inputLabel: string
                            start: string
                          video:
                            audioGainLevel: 0
                            cropRectangle:
                                height: string
                                left: string
                                top: string
                                width: string
                            end: string
                            fadeInDuration: string
                            fadeOutDuration: string
                            inputLabel: string
                            opacity: 0
                            position:
                                height: string
                                left: string
                                top: string
                                width: string
                            start: string
                    rotation: string
                formats:
                    - jpg:
                        filenamePattern: string
                      mp4:
                        filenamePattern: string
                        outputFiles:
                            - labels:
                                - string
                      png:
                        filenamePattern: string
                      transportStream:
                        filenamePattern: string
                        outputFiles:
                            - labels:
                                - string
              onErrorAction: string
              relativePriority: string
        resourceGroupName: string
    

    Transform Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Transform resource accepts the following input properties:

    MediaServicesAccountName string
    The Media Services account name. Changing this forces a new Transform to be created.
    ResourceGroupName string
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    Description string
    An optional verbose description of the Transform.
    Name string
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    Outputs List<TransformOutput>
    One or more output blocks as defined below. At least one output must be defined.
    MediaServicesAccountName string
    The Media Services account name. Changing this forces a new Transform to be created.
    ResourceGroupName string
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    Description string
    An optional verbose description of the Transform.
    Name string
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    Outputs []TransformOutputTypeArgs
    One or more output blocks as defined below. At least one output must be defined.
    mediaServicesAccountName String
    The Media Services account name. Changing this forces a new Transform to be created.
    resourceGroupName String
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description String
    An optional verbose description of the Transform.
    name String
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs List<TransformOutput>
    One or more output blocks as defined below. At least one output must be defined.
    mediaServicesAccountName string
    The Media Services account name. Changing this forces a new Transform to be created.
    resourceGroupName string
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description string
    An optional verbose description of the Transform.
    name string
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs TransformOutput[]
    One or more output blocks as defined below. At least one output must be defined.
    media_services_account_name str
    The Media Services account name. Changing this forces a new Transform to be created.
    resource_group_name str
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description str
    An optional verbose description of the Transform.
    name str
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs Sequence[TransformOutputArgs]
    One or more output blocks as defined below. At least one output must be defined.
    mediaServicesAccountName String
    The Media Services account name. Changing this forces a new Transform to be created.
    resourceGroupName String
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description String
    An optional verbose description of the Transform.
    name String
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs List<Property Map>
    One or more output blocks as defined below. At least one output must be defined.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Transform resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing Transform Resource

    Get an existing Transform resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: TransformState, opts?: CustomResourceOptions): Transform
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            description: Optional[str] = None,
            media_services_account_name: Optional[str] = None,
            name: Optional[str] = None,
            outputs: Optional[Sequence[TransformOutputArgs]] = None,
            resource_group_name: Optional[str] = None) -> Transform
    func GetTransform(ctx *Context, name string, id IDInput, state *TransformState, opts ...ResourceOption) (*Transform, error)
    public static Transform Get(string name, Input<string> id, TransformState? state, CustomResourceOptions? opts = null)
    public static Transform get(String name, Output<String> id, TransformState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Description string
    An optional verbose description of the Transform.
    MediaServicesAccountName string
    The Media Services account name. Changing this forces a new Transform to be created.
    Name string
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    Outputs List<TransformOutput>
    One or more output blocks as defined below. At least one output must be defined.
    ResourceGroupName string
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    Description string
    An optional verbose description of the Transform.
    MediaServicesAccountName string
    The Media Services account name. Changing this forces a new Transform to be created.
    Name string
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    Outputs []TransformOutputTypeArgs
    One or more output blocks as defined below. At least one output must be defined.
    ResourceGroupName string
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description String
    An optional verbose description of the Transform.
    mediaServicesAccountName String
    The Media Services account name. Changing this forces a new Transform to be created.
    name String
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs List<TransformOutput>
    One or more output blocks as defined below. At least one output must be defined.
    resourceGroupName String
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description string
    An optional verbose description of the Transform.
    mediaServicesAccountName string
    The Media Services account name. Changing this forces a new Transform to be created.
    name string
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs TransformOutput[]
    One or more output blocks as defined below. At least one output must be defined.
    resourceGroupName string
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description str
    An optional verbose description of the Transform.
    media_services_account_name str
    The Media Services account name. Changing this forces a new Transform to be created.
    name str
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs Sequence[TransformOutputArgs]
    One or more output blocks as defined below. At least one output must be defined.
    resource_group_name str
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description String
    An optional verbose description of the Transform.
    mediaServicesAccountName String
    The Media Services account name. Changing this forces a new Transform to be created.
    name String
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs List<Property Map>
    One or more output blocks as defined below. At least one output must be defined.
    resourceGroupName String
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.

    Supporting Types

    TransformOutput, TransformOutputArgs

    AudioAnalyzerPreset TransformOutputAudioAnalyzerPreset
    An audio_analyzer_preset block as defined above.
    BuiltinPreset TransformOutputBuiltinPreset
    A builtin_preset block as defined above.
    CustomPreset TransformOutputCustomPreset
    A custom_preset block as defined above.
    FaceDetectorPreset TransformOutputFaceDetectorPreset
    A face_detector_preset block as defined above.

    Deprecated: face_detector_preset will be removed in version 4.0 of the AzureRM Provider as it has been retired.

    OnErrorAction string
    A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possible values are StopProcessingJob or ContinueJob. Defaults to StopProcessingJob.
    RelativePriority string
    Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are High, Normal or Low. Defaults to Normal.
    VideoAnalyzerPreset TransformOutputVideoAnalyzerPreset

    A video_analyzer_preset block as defined below.

    NOTE: Each output can only have one type of preset: builtin_preset, audio_analyzer_preset, custom_preset, face_detector_preset or video_analyzer_preset. If you need to apply different presets you must create one output for each one.

    Deprecated: video_analyzer_preset will be removed in version 4.0 of the AzureRM Provider as it has been retired.

    AudioAnalyzerPreset TransformOutputAudioAnalyzerPreset
    An audio_analyzer_preset block as defined above.
    BuiltinPreset TransformOutputBuiltinPreset
    A builtin_preset block as defined above.
    CustomPreset TransformOutputCustomPreset
    A custom_preset block as defined above.
    FaceDetectorPreset TransformOutputFaceDetectorPreset
    A face_detector_preset block as defined above.

    Deprecated: face_detector_preset will be removed in version 4.0 of the AzureRM Provider as it has been retired.

    OnErrorAction string
    A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possible values are StopProcessingJob or ContinueJob. Defaults to StopProcessingJob.
    RelativePriority string
    Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are High, Normal or Low. Defaults to Normal.
    VideoAnalyzerPreset TransformOutputVideoAnalyzerPreset

    A video_analyzer_preset block as defined below.

    NOTE: Each output can only have one type of preset: builtin_preset, audio_analyzer_preset, custom_preset, face_detector_preset or video_analyzer_preset. If you need to apply different presets you must create one output for each one.

    Deprecated: video_analyzer_preset will be removed in version 4.0 of the AzureRM Provider as it has been retired.

    audioAnalyzerPreset TransformOutputAudioAnalyzerPreset
    An audio_analyzer_preset block as defined above.
    builtinPreset TransformOutputBuiltinPreset
    A builtin_preset block as defined above.
    customPreset TransformOutputCustomPreset
    A custom_preset block as defined above.
    faceDetectorPreset TransformOutputFaceDetectorPreset
    A face_detector_preset block as defined above.

    Deprecated: face_detector_preset will be removed in version 4.0 of the AzureRM Provider as it has been retired.

    onErrorAction String
    A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possible values are StopProcessingJob or ContinueJob. Defaults to StopProcessingJob.
    relativePriority String
    Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are High, Normal or Low. Defaults to Normal.
    videoAnalyzerPreset TransformOutputVideoAnalyzerPreset

    A video_analyzer_preset block as defined below.

    NOTE: Each output can only have one type of preset: builtin_preset, audio_analyzer_preset, custom_preset, face_detector_preset or video_analyzer_preset. If you need to apply different presets you must create one output for each one.

    Deprecated: video_analyzer_preset will be removed in version 4.0 of the AzureRM Provider as it has been retired.

    audioAnalyzerPreset TransformOutputAudioAnalyzerPreset
    An audio_analyzer_preset block as defined above.
    builtinPreset TransformOutputBuiltinPreset
    A builtin_preset block as defined above.
    customPreset TransformOutputCustomPreset
    A custom_preset block as defined above.
    faceDetectorPreset TransformOutputFaceDetectorPreset
    A face_detector_preset block as defined above.

    Deprecated: face_detector_preset will be removed in version 4.0 of the AzureRM Provider as it has been retired.

    onErrorAction string
    A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possible values are StopProcessingJob or ContinueJob. Defaults to StopProcessingJob.
    relativePriority string
    Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are High, Normal or Low. Defaults to Normal.
    videoAnalyzerPreset TransformOutputVideoAnalyzerPreset

    A video_analyzer_preset block as defined below.

    NOTE: Each output can only have one type of preset: builtin_preset, audio_analyzer_preset, custom_preset, face_detector_preset or video_analyzer_preset. If you need to apply different presets you must create one output for each one.

    Deprecated: video_analyzer_preset will be removed in version 4.0 of the AzureRM Provider as it has been retired.

    audio_analyzer_preset TransformOutputAudioAnalyzerPreset
    An audio_analyzer_preset block as defined above.
    builtin_preset TransformOutputBuiltinPreset
    A builtin_preset block as defined above.
    custom_preset TransformOutputCustomPreset
    A custom_preset block as defined above.
    face_detector_preset TransformOutputFaceDetectorPreset
    A face_detector_preset block as defined above.

    Deprecated: face_detector_preset will be removed in version 4.0 of the AzureRM Provider as it has been retired.

    on_error_action str
    A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possible values are StopProcessingJob or ContinueJob. Defaults to StopProcessingJob.
    relative_priority str
    Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are High, Normal or Low. Defaults to Normal.
    video_analyzer_preset TransformOutputVideoAnalyzerPreset

    A video_analyzer_preset block as defined below.

    NOTE: Each output can only have one type of preset: builtin_preset, audio_analyzer_preset, custom_preset, face_detector_preset or video_analyzer_preset. If you need to apply different presets you must create one output for each one.

    Deprecated: video_analyzer_preset will be removed in version 4.0 of the AzureRM Provider as it has been retired.

    audioAnalyzerPreset Property Map
    An audio_analyzer_preset block as defined above.
    builtinPreset Property Map
    A builtin_preset block as defined above.
    customPreset Property Map
    A custom_preset block as defined above.
    faceDetectorPreset Property Map
    A face_detector_preset block as defined above.

    Deprecated: face_detector_preset will be removed in version 4.0 of the AzureRM Provider as it has been retired.

    onErrorAction String
    A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possible values are StopProcessingJob or ContinueJob. Defaults to StopProcessingJob.
    relativePriority String
    Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are High, Normal or Low. Defaults to Normal.
    videoAnalyzerPreset Property Map

    A video_analyzer_preset block as defined below.

    NOTE: Each output can only have one type of preset: builtin_preset, audio_analyzer_preset, custom_preset, face_detector_preset or video_analyzer_preset. If you need to apply different presets you must create one output for each one.

    Deprecated: video_analyzer_preset will be removed in version 4.0 of the AzureRM Provider as it has been retired.

    TransformOutputAudioAnalyzerPreset, TransformOutputAudioAnalyzerPresetArgs

    AudioAnalysisMode string
    Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard.
    AudioLanguage string
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    ExperimentalOptions Dictionary<string, string>
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    AudioAnalysisMode string
    Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard.
    AudioLanguage string
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    ExperimentalOptions map[string]string
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    audioAnalysisMode String
    Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard.
    audioLanguage String
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    experimentalOptions Map<String,String>
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    audioAnalysisMode string
    Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard.
    audioLanguage string
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    experimentalOptions {[key: string]: string}
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    audio_analysis_mode str
    Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard.
    audio_language str
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    experimental_options Mapping[str, str]
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    audioAnalysisMode String
    Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard.
    audioLanguage String
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    experimentalOptions Map<String>
    Dictionary containing key value pairs for parameters not exposed in the preset itself.

    TransformOutputBuiltinPreset, TransformOutputBuiltinPresetArgs

    PresetName string
    The built-in preset to be used for encoding videos. The Possible values are AACGoodQualityAudio, AdaptiveStreaming, ContentAwareEncoding, ContentAwareEncodingExperimental, CopyAllBitrateNonInterleaved, DDGoodQualityAudio, H265AdaptiveStreaming, H265ContentAwareEncoding, H265SingleBitrate4K, H265SingleBitrate1080p, H265SingleBitrate720p, H264MultipleBitrate1080p, H264MultipleBitrateSD, H264MultipleBitrate720p, H264SingleBitrate1080p, H264SingleBitrateSD and H264SingleBitrate720p.
    PresetConfiguration TransformOutputBuiltinPresetPresetConfiguration
    A preset_configuration block as defined below.
    PresetName string
    The built-in preset to be used for encoding videos. The Possible values are AACGoodQualityAudio, AdaptiveStreaming, ContentAwareEncoding, ContentAwareEncodingExperimental, CopyAllBitrateNonInterleaved, DDGoodQualityAudio, H265AdaptiveStreaming, H265ContentAwareEncoding, H265SingleBitrate4K, H265SingleBitrate1080p, H265SingleBitrate720p, H264MultipleBitrate1080p, H264MultipleBitrateSD, H264MultipleBitrate720p, H264SingleBitrate1080p, H264SingleBitrateSD and H264SingleBitrate720p.
    PresetConfiguration TransformOutputBuiltinPresetPresetConfiguration
    A preset_configuration block as defined below.
    presetName String
    The built-in preset to be used for encoding videos. The Possible values are AACGoodQualityAudio, AdaptiveStreaming, ContentAwareEncoding, ContentAwareEncodingExperimental, CopyAllBitrateNonInterleaved, DDGoodQualityAudio, H265AdaptiveStreaming, H265ContentAwareEncoding, H265SingleBitrate4K, H265SingleBitrate1080p, H265SingleBitrate720p, H264MultipleBitrate1080p, H264MultipleBitrateSD, H264MultipleBitrate720p, H264SingleBitrate1080p, H264SingleBitrateSD and H264SingleBitrate720p.
    presetConfiguration TransformOutputBuiltinPresetPresetConfiguration
    A preset_configuration block as defined below.
    presetName string
    The built-in preset to be used for encoding videos. The Possible values are AACGoodQualityAudio, AdaptiveStreaming, ContentAwareEncoding, ContentAwareEncodingExperimental, CopyAllBitrateNonInterleaved, DDGoodQualityAudio, H265AdaptiveStreaming, H265ContentAwareEncoding, H265SingleBitrate4K, H265SingleBitrate1080p, H265SingleBitrate720p, H264MultipleBitrate1080p, H264MultipleBitrateSD, H264MultipleBitrate720p, H264SingleBitrate1080p, H264SingleBitrateSD and H264SingleBitrate720p.
    presetConfiguration TransformOutputBuiltinPresetPresetConfiguration
    A preset_configuration block as defined below.
    preset_name str
    The built-in preset to be used for encoding videos. The Possible values are AACGoodQualityAudio, AdaptiveStreaming, ContentAwareEncoding, ContentAwareEncodingExperimental, CopyAllBitrateNonInterleaved, DDGoodQualityAudio, H265AdaptiveStreaming, H265ContentAwareEncoding, H265SingleBitrate4K, H265SingleBitrate1080p, H265SingleBitrate720p, H264MultipleBitrate1080p, H264MultipleBitrateSD, H264MultipleBitrate720p, H264SingleBitrate1080p, H264SingleBitrateSD and H264SingleBitrate720p.
    preset_configuration TransformOutputBuiltinPresetPresetConfiguration
    A preset_configuration block as defined below.
    presetName String
    The built-in preset to be used for encoding videos. The Possible values are AACGoodQualityAudio, AdaptiveStreaming, ContentAwareEncoding, ContentAwareEncodingExperimental, CopyAllBitrateNonInterleaved, DDGoodQualityAudio, H265AdaptiveStreaming, H265ContentAwareEncoding, H265SingleBitrate4K, H265SingleBitrate1080p, H265SingleBitrate720p, H264MultipleBitrate1080p, H264MultipleBitrateSD, H264MultipleBitrate720p, H264SingleBitrate1080p, H264SingleBitrateSD and H264SingleBitrate720p.
    presetConfiguration Property Map
    A preset_configuration block as defined below.

    TransformOutputBuiltinPresetPresetConfiguration, TransformOutputBuiltinPresetPresetConfigurationArgs

    Complexity string
    The complexity of the encoding. Possible values are Balanced, Speed or Quality.
    InterleaveOutput string
    Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are InterleavedOutput and NonInterleavedOutput.
    KeyFrameIntervalInSeconds double
    The key frame interval in seconds. Possible value is a positive float. For example, set as 2.0 to reduce the playback buffering for some players.
    MaxBitrateBps int
    The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 6000000 to avoid producing very high bitrate outputs for contents with high complexity.
    MaxHeight int
    The maximum height of output video layers. For example, set as 720 to produce output layers up to 720P even if the input is 4K.
    MaxLayers int
    The maximum number of output video layers. For example, set as 4 to make sure at most 4 output layers are produced to control the overall cost of the encoding job.
    MinBitrateBps int
    The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 200000 to have a bottom layer that covers users with low network bandwidth.
    MinHeight int
    The minimum height of output video layers. For example, set as 360 to avoid output layers of smaller resolutions like 180P.
    Complexity string
    The complexity of the encoding. Possible values are Balanced, Speed or Quality.
    InterleaveOutput string
    Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are InterleavedOutput and NonInterleavedOutput.
    KeyFrameIntervalInSeconds float64
    The key frame interval in seconds. Possible value is a positive float. For example, set as 2.0 to reduce the playback buffering for some players.
    MaxBitrateBps int
    The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 6000000 to avoid producing very high bitrate outputs for contents with high complexity.
    MaxHeight int
    The maximum height of output video layers. For example, set as 720 to produce output layers up to 720P even if the input is 4K.
    MaxLayers int
    The maximum number of output video layers. For example, set as 4 to make sure at most 4 output layers are produced to control the overall cost of the encoding job.
    MinBitrateBps int
    The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 200000 to have a bottom layer that covers users with low network bandwidth.
    MinHeight int
    The minimum height of output video layers. For example, set as 360 to avoid output layers of smaller resolutions like 180P.
    complexity String
    The complexity of the encoding. Possible values are Balanced, Speed or Quality.
    interleaveOutput String
    Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are InterleavedOutput and NonInterleavedOutput.
    keyFrameIntervalInSeconds Double
    The key frame interval in seconds. Possible value is a positive float. For example, set as 2.0 to reduce the playback buffering for some players.
    maxBitrateBps Integer
    The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 6000000 to avoid producing very high bitrate outputs for contents with high complexity.
    maxHeight Integer
    The maximum height of output video layers. For example, set as 720 to produce output layers up to 720P even if the input is 4K.
    maxLayers Integer
    The maximum number of output video layers. For example, set as 4 to make sure at most 4 output layers are produced to control the overall cost of the encoding job.
    minBitrateBps Integer
    The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 200000 to have a bottom layer that covers users with low network bandwidth.
    minHeight Integer
    The minimum height of output video layers. For example, set as 360 to avoid output layers of smaller resolutions like 180P.
    complexity string
    The complexity of the encoding. Possible values are Balanced, Speed or Quality.
    interleaveOutput string
    Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are InterleavedOutput and NonInterleavedOutput.
    keyFrameIntervalInSeconds number
    The key frame interval in seconds. Possible value is a positive float. For example, set as 2.0 to reduce the playback buffering for some players.
    maxBitrateBps number
    The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 6000000 to avoid producing very high bitrate outputs for contents with high complexity.
    maxHeight number
    The maximum height of output video layers. For example, set as 720 to produce output layers up to 720P even if the input is 4K.
    maxLayers number
    The maximum number of output video layers. For example, set as 4 to make sure at most 4 output layers are produced to control the overall cost of the encoding job.
    minBitrateBps number
    The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 200000 to have a bottom layer that covers users with low network bandwidth.
    minHeight number
    The minimum height of output video layers. For example, set as 360 to avoid output layers of smaller resolutions like 180P.
    complexity str
    The complexity of the encoding. Possible values are Balanced, Speed or Quality.
    interleave_output str
    Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are InterleavedOutput and NonInterleavedOutput.
    key_frame_interval_in_seconds float
    The key frame interval in seconds. Possible value is a positive float. For example, set as 2.0 to reduce the playback buffering for some players.
    max_bitrate_bps int
    The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 6000000 to avoid producing very high bitrate outputs for contents with high complexity.
    max_height int
    The maximum height of output video layers. For example, set as 720 to produce output layers up to 720P even if the input is 4K.
    max_layers int
    The maximum number of output video layers. For example, set as 4 to make sure at most 4 output layers are produced to control the overall cost of the encoding job.
    min_bitrate_bps int
    The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 200000 to have a bottom layer that covers users with low network bandwidth.
    min_height int
    The minimum height of output video layers. For example, set as 360 to avoid output layers of smaller resolutions like 180P.
    complexity String
    The complexity of the encoding. Possible values are Balanced, Speed or Quality.
    interleaveOutput String
    Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are InterleavedOutput and NonInterleavedOutput.
    keyFrameIntervalInSeconds Number
    The key frame interval in seconds. Possible value is a positive float. For example, set as 2.0 to reduce the playback buffering for some players.
    maxBitrateBps Number
    The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 6000000 to avoid producing very high bitrate outputs for contents with high complexity.
    maxHeight Number
    The maximum height of output video layers. For example, set as 720 to produce output layers up to 720P even if the input is 4K.
    maxLayers Number
    The maximum number of output video layers. For example, set as 4 to make sure at most 4 output layers are produced to control the overall cost of the encoding job.
    minBitrateBps Number
    The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 200000 to have a bottom layer that covers users with low network bandwidth.
    minHeight Number
    The minimum height of output video layers. For example, set as 360 to avoid output layers of smaller resolutions like 180P.

    TransformOutputCustomPreset, TransformOutputCustomPresetArgs

    Codecs List<TransformOutputCustomPresetCodec>
    One or more codec blocks as defined above.
    Formats List<TransformOutputCustomPresetFormat>
    One or more format blocks as defined below.
    ExperimentalOptions Dictionary<string, string>
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    Filter TransformOutputCustomPresetFilter
    A filter block as defined below.
    Codecs []TransformOutputCustomPresetCodec
    One or more codec blocks as defined above.
    Formats []TransformOutputCustomPresetFormat
    One or more format blocks as defined below.
    ExperimentalOptions map[string]string
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    Filter TransformOutputCustomPresetFilter
    A filter block as defined below.
    codecs List<TransformOutputCustomPresetCodec>
    One or more codec blocks as defined above.
    formats List<TransformOutputCustomPresetFormat>
    One or more format blocks as defined below.
    experimentalOptions Map<String,String>
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    filter TransformOutputCustomPresetFilter
    A filter block as defined below.
    codecs TransformOutputCustomPresetCodec[]
    One or more codec blocks as defined above.
    formats TransformOutputCustomPresetFormat[]
    One or more format blocks as defined below.
    experimentalOptions {[key: string]: string}
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    filter TransformOutputCustomPresetFilter
    A filter block as defined below.
    codecs Sequence[TransformOutputCustomPresetCodec]
    One or more codec blocks as defined above.
    formats Sequence[TransformOutputCustomPresetFormat]
    One or more format blocks as defined below.
    experimental_options Mapping[str, str]
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    filter TransformOutputCustomPresetFilter
    A filter block as defined below.
    codecs List<Property Map>
    One or more codec blocks as defined above.
    formats List<Property Map>
    One or more format blocks as defined below.
    experimentalOptions Map<String>
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    filter Property Map
    A filter block as defined below.

    TransformOutputCustomPresetCodec, TransformOutputCustomPresetCodecArgs

    AacAudio TransformOutputCustomPresetCodecAacAudio
    A aac_audio block as defined above.
    CopyAudio TransformOutputCustomPresetCodecCopyAudio
    A copy_audio block as defined below.
    CopyVideo TransformOutputCustomPresetCodecCopyVideo
    A copy_video block as defined below.
    DdAudio TransformOutputCustomPresetCodecDdAudio
    A dd_audio block as defined below.
    H264Video TransformOutputCustomPresetCodecH264Video
    A h264_video block as defined below.
    H265Video TransformOutputCustomPresetCodecH265Video
    A h265_video block as defined below.
    JpgImage TransformOutputCustomPresetCodecJpgImage
    A jpg_image block as defined below.
    PngImage TransformOutputCustomPresetCodecPngImage

    A png_image block as defined below.

    NOTE: Each codec can only have one type: aac_audio, copy_audio, copy_video, dd_audio, h264_video, h265_video, jpg_image or png_image. If you need to apply different codec you must create one codec for each one.

    AacAudio TransformOutputCustomPresetCodecAacAudio
    A aac_audio block as defined above.
    CopyAudio TransformOutputCustomPresetCodecCopyAudio
    A copy_audio block as defined below.
    CopyVideo TransformOutputCustomPresetCodecCopyVideo
    A copy_video block as defined below.
    DdAudio TransformOutputCustomPresetCodecDdAudio
    A dd_audio block as defined below.
    H264Video TransformOutputCustomPresetCodecH264Video
    A h264_video block as defined below.
    H265Video TransformOutputCustomPresetCodecH265Video
    A h265_video block as defined below.
    JpgImage TransformOutputCustomPresetCodecJpgImage
    A jpg_image block as defined below.
    PngImage TransformOutputCustomPresetCodecPngImage

    A png_image block as defined below.

    NOTE: Each codec can only have one type: aac_audio, copy_audio, copy_video, dd_audio, h264_video, h265_video, jpg_image or png_image. If you need to apply different codec you must create one codec for each one.

    aacAudio TransformOutputCustomPresetCodecAacAudio
    A aac_audio block as defined above.
    copyAudio TransformOutputCustomPresetCodecCopyAudio
    A copy_audio block as defined below.
    copyVideo TransformOutputCustomPresetCodecCopyVideo
    A copy_video block as defined below.
    ddAudio TransformOutputCustomPresetCodecDdAudio
    A dd_audio block as defined below.
    h264Video TransformOutputCustomPresetCodecH264Video
    A h264_video block as defined below.
    h265Video TransformOutputCustomPresetCodecH265Video
    A h265_video block as defined below.
    jpgImage TransformOutputCustomPresetCodecJpgImage
    A jpg_image block as defined below.
    pngImage TransformOutputCustomPresetCodecPngImage

    A png_image block as defined below.

    NOTE: Each codec can only have one type: aac_audio, copy_audio, copy_video, dd_audio, h264_video, h265_video, jpg_image or png_image. If you need to apply different codec you must create one codec for each one.

    aacAudio TransformOutputCustomPresetCodecAacAudio
    A aac_audio block as defined above.
    copyAudio TransformOutputCustomPresetCodecCopyAudio
    A copy_audio block as defined below.
    copyVideo TransformOutputCustomPresetCodecCopyVideo
    A copy_video block as defined below.
    ddAudio TransformOutputCustomPresetCodecDdAudio
    A dd_audio block as defined below.
    h264Video TransformOutputCustomPresetCodecH264Video
    A h264_video block as defined below.
    h265Video TransformOutputCustomPresetCodecH265Video
    A h265_video block as defined below.
    jpgImage TransformOutputCustomPresetCodecJpgImage
    A jpg_image block as defined below.
    pngImage TransformOutputCustomPresetCodecPngImage

    A png_image block as defined below.

    NOTE: Each codec can only have one type: aac_audio, copy_audio, copy_video, dd_audio, h264_video, h265_video, jpg_image or png_image. If you need to apply different codec you must create one codec for each one.

    aac_audio TransformOutputCustomPresetCodecAacAudio
    A aac_audio block as defined above.
    copy_audio TransformOutputCustomPresetCodecCopyAudio
    A copy_audio block as defined below.
    copy_video TransformOutputCustomPresetCodecCopyVideo
    A copy_video block as defined below.
    dd_audio TransformOutputCustomPresetCodecDdAudio
    A dd_audio block as defined below.
    h264_video TransformOutputCustomPresetCodecH264Video
    A h264_video block as defined below.
    h265_video TransformOutputCustomPresetCodecH265Video
    A h265_video block as defined below.
    jpg_image TransformOutputCustomPresetCodecJpgImage
    A jpg_image block as defined below.
    png_image TransformOutputCustomPresetCodecPngImage

    A png_image block as defined below.

    NOTE: Each codec can only have one type: aac_audio, copy_audio, copy_video, dd_audio, h264_video, h265_video, jpg_image or png_image. If you need to apply different codec you must create one codec for each one.

    aacAudio Property Map
    A aac_audio block as defined above.
    copyAudio Property Map
    A copy_audio block as defined below.
    copyVideo Property Map
    A copy_video block as defined below.
    ddAudio Property Map
    A dd_audio block as defined below.
    h264Video Property Map
    A h264_video block as defined below.
    h265Video Property Map
    A h265_video block as defined below.
    jpgImage Property Map
    A jpg_image block as defined below.
    pngImage Property Map

    A png_image block as defined below.

    NOTE: Each codec can only have one type: aac_audio, copy_audio, copy_video, dd_audio, h264_video, h265_video, jpg_image or png_image. If you need to apply different codec you must create one codec for each one.

    TransformOutputCustomPresetCodecAacAudio, TransformOutputCustomPresetCodecAacAudioArgs

    Bitrate int
    The bitrate of the audio in bits per second. Default to 128000.
    Channels int
    The number of audio channels. Default to 2.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    Profile string
    The encoding profile to be used when encoding audio with AAC. Possible values are AacLc, HeAacV1,and HeAacV2. Default to AacLc.
    SamplingRate int
    The sampling rate to use for encoding in Hertz. Default to 48000.
    Bitrate int
    The bitrate of the audio in bits per second. Default to 128000.
    Channels int
    The number of audio channels. Default to 2.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    Profile string
    The encoding profile to be used when encoding audio with AAC. Possible values are AacLc, HeAacV1,and HeAacV2. Default to AacLc.
    SamplingRate int
    The sampling rate to use for encoding in Hertz. Default to 48000.
    bitrate Integer
    The bitrate of the audio in bits per second. Default to 128000.
    channels Integer
    The number of audio channels. Default to 2.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    profile String
    The encoding profile to be used when encoding audio with AAC. Possible values are AacLc, HeAacV1,and HeAacV2. Default to AacLc.
    samplingRate Integer
    The sampling rate to use for encoding in Hertz. Default to 48000.
    bitrate number
    The bitrate of the audio in bits per second. Default to 128000.
    channels number
    The number of audio channels. Default to 2.
    label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    profile string
    The encoding profile to be used when encoding audio with AAC. Possible values are AacLc, HeAacV1,and HeAacV2. Default to AacLc.
    samplingRate number
    The sampling rate to use for encoding in Hertz. Default to 48000.
    bitrate int
    The bitrate of the audio in bits per second. Default to 128000.
    channels int
    The number of audio channels. Default to 2.
    label str
    Specifies the label for the codec. The label can be used to control muxing behavior.
    profile str
    The encoding profile to be used when encoding audio with AAC. Possible values are AacLc, HeAacV1,and HeAacV2. Default to AacLc.
    sampling_rate int
    The sampling rate to use for encoding in Hertz. Default to 48000.
    bitrate Number
    The bitrate of the audio in bits per second. Default to 128000.
    channels Number
    The number of audio channels. Default to 2.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    profile String
    The encoding profile to be used when encoding audio with AAC. Possible values are AacLc, HeAacV1,and HeAacV2. Default to AacLc.
    samplingRate Number
    The sampling rate to use for encoding in Hertz. Default to 48000.

    TransformOutputCustomPresetCodecCopyAudio, TransformOutputCustomPresetCodecCopyAudioArgs

    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    label str
    Specifies the label for the codec. The label can be used to control muxing behavior.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.

    TransformOutputCustomPresetCodecCopyVideo, TransformOutputCustomPresetCodecCopyVideoArgs

    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    label str
    Specifies the label for the codec. The label can be used to control muxing behavior.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.

    TransformOutputCustomPresetCodecDdAudio, TransformOutputCustomPresetCodecDdAudioArgs

    Bitrate int
    The bitrate of the audio in bits per second. Default to 192000.
    Channels int
    The number of audio channels. Default to 2.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    SamplingRate int
    The sampling rate to use for encoding in Hertz. Default to 48000.
    Bitrate int
    The bitrate of the audio in bits per second. Default to 192000.
    Channels int
    The number of audio channels. Default to 2.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    SamplingRate int
    The sampling rate to use for encoding in Hertz. Default to 48000.
    bitrate Integer
    The bitrate of the audio in bits per second. Default to 192000.
    channels Integer
    The number of audio channels. Default to 2.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    samplingRate Integer
    The sampling rate to use for encoding in Hertz. Default to 48000.
    bitrate number
    The bitrate of the audio in bits per second. Default to 192000.
    channels number
    The number of audio channels. Default to 2.
    label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    samplingRate number
    The sampling rate to use for encoding in Hertz. Default to 48000.
    bitrate int
    The bitrate of the audio in bits per second. Default to 192000.
    channels int
    The number of audio channels. Default to 2.
    label str
    Specifies the label for the codec. The label can be used to control muxing behavior.
    sampling_rate int
    The sampling rate to use for encoding in Hertz. Default to 48000.
    bitrate Number
    The bitrate of the audio in bits per second. Default to 192000.
    channels Number
    The number of audio channels. Default to 2.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    samplingRate Number
    The sampling rate to use for encoding in Hertz. Default to 48000.

    TransformOutputCustomPresetCodecH264Video, TransformOutputCustomPresetCodecH264VideoArgs

    Complexity string
    The complexity of the encoding. Possible values are Balanced, Speed or Quality. Default to Balanced.
    KeyFrameInterval string
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    Layers List<TransformOutputCustomPresetCodecH264VideoLayer>
    One or more layer blocks as defined below.
    RateControlMode string
    The rate control mode. Possible values are ABR, CBR or CRF. Default to ABR.
    SceneChangeDetectionEnabled bool
    Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false.
    StretchMode string
    Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    SyncMode string
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    Complexity string
    The complexity of the encoding. Possible values are Balanced, Speed or Quality. Default to Balanced.
    KeyFrameInterval string
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    Layers []TransformOutputCustomPresetCodecH264VideoLayer
    One or more layer blocks as defined below.
    RateControlMode string
    The rate control mode. Possible values are ABR, CBR or CRF. Default to ABR.
    SceneChangeDetectionEnabled bool
    Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false.
    StretchMode string
    Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    SyncMode string
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    complexity String
    The complexity of the encoding. Possible values are Balanced, Speed or Quality. Default to Balanced.
    keyFrameInterval String
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers List<TransformOutputCustomPresetCodecH264VideoLayer>
    One or more layer blocks as defined below.
    rateControlMode String
    The rate control mode. Possible values are ABR, CBR or CRF. Default to ABR.
    sceneChangeDetectionEnabled Boolean
    Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false.
    stretchMode String
    Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    syncMode String
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    complexity string
    The complexity of the encoding. Possible values are Balanced, Speed or Quality. Default to Balanced.
    keyFrameInterval string
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers TransformOutputCustomPresetCodecH264VideoLayer[]
    One or more layer blocks as defined below.
    rateControlMode string
    The rate control mode. Possible values are ABR, CBR or CRF. Default to ABR.
    sceneChangeDetectionEnabled boolean
    Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false.
    stretchMode string
    Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    syncMode string
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    complexity str
    The complexity of the encoding. Possible values are Balanced, Speed or Quality. Default to Balanced.
    key_frame_interval str
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label str
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers Sequence[TransformOutputCustomPresetCodecH264VideoLayer]
    One or more layer blocks as defined below.
    rate_control_mode str
    The rate control mode. Possible values are ABR, CBR or CRF. Default to ABR.
    scene_change_detection_enabled bool
    Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false.
    stretch_mode str
    Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    sync_mode str
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    complexity String
    The complexity of the encoding. Possible values are Balanced, Speed or Quality. Default to Balanced.
    keyFrameInterval String
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers List<Property Map>
    One or more layer blocks as defined below.
    rateControlMode String
    The rate control mode. Possible values are ABR, CBR or CRF. Default to ABR.
    sceneChangeDetectionEnabled Boolean
    Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false.
    stretchMode String
    Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    syncMode String
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.

    TransformOutputCustomPresetCodecH264VideoLayer, TransformOutputCustomPresetCodecH264VideoLayerArgs

    Bitrate int
    The average bitrate in bits per second at which to encode the input video when generating this layer.
    AdaptiveBFrameEnabled bool
    Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true.
    BFrames int
    The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
    BufferWindow string
    Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S.
    Crf double
    The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 28.
    EntropyMode string
    The entropy mode to be used for this layer. Possible values are Cabac or Cavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level.
    FrameRate string
    The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video.
    Height string
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    Label string
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    Level string
    The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer.
    MaxBitrate int
    The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
    Profile string
    The H.264 profile. Possible values are Auto, Main and Main10. Default to Auto.
    ReferenceFrames int
    The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
    Slices int
    The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame.
    Width string
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    Bitrate int
    The average bitrate in bits per second at which to encode the input video when generating this layer.
    AdaptiveBFrameEnabled bool
    Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true.
    BFrames int
    The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
    BufferWindow string
    Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S.
    Crf float64
    The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 28.
    EntropyMode string
    The entropy mode to be used for this layer. Possible values are Cabac or Cavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level.
    FrameRate string
    The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video.
    Height string
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    Label string
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    Level string
    The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer.
    MaxBitrate int
    The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
    Profile string
    The H.264 profile. Possible values are Auto, Main and Main10. Default to Auto.
    ReferenceFrames int
    The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
    Slices int
    The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame.
    Width string
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    bitrate Integer
    The average bitrate in bits per second at which to encode the input video when generating this layer.
    adaptiveBFrameEnabled Boolean
    Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true.
    bFrames Integer
    The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
    bufferWindow String
    Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S.
    crf Double
    The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 28.
    entropyMode String
    The entropy mode to be used for this layer. Possible values are Cabac or Cavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level.
    frameRate String
    The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video.
    height String
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label String
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    level String
    The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer.
    maxBitrate Integer
    The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
    profile String
    The H.264 profile. Possible values are Auto, Main and Main10. Default to Auto.
    referenceFrames Integer
    The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
    slices Integer
    The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame.
    width String
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    bitrate number
    The average bitrate in bits per second at which to encode the input video when generating this layer.
    adaptiveBFrameEnabled boolean
    Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true.
    bFrames number
    The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
    bufferWindow string
    Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S.
    crf number
    The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 28.
    entropyMode string
    The entropy mode to be used for this layer. Possible values are Cabac or Cavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level.
    frameRate string
    The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video.
    height string
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label string
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    level string
    The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer.
    maxBitrate number
    The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
    profile string
    The H.264 profile. Possible values are Auto, Main and Main10. Default to Auto.
    referenceFrames number
    The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
    slices number
    The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame.
    width string
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    bitrate int
    The average bitrate in bits per second at which to encode the input video when generating this layer.
    adaptive_b_frame_enabled bool
    Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true.
    b_frames int
    The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
    buffer_window str
    Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S.
    crf float
    The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 28.
    entropy_mode str
    The entropy mode to be used for this layer. Possible values are Cabac or Cavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level.
    frame_rate str
    The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video.
    height str
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label str
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    level str
    The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer.
    max_bitrate int
    The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
    profile str
    The H.264 profile. Possible values are Auto, Main and Main10. Default to Auto.
    reference_frames int
    The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
    slices int
    The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame.
    width str
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    bitrate Number
    The average bitrate in bits per second at which to encode the input video when generating this layer.
    adaptiveBFrameEnabled Boolean
    Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true.
    bFrames Number
    The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
    bufferWindow String
    Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S.
    crf Number
    The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 28.
    entropyMode String
    The entropy mode to be used for this layer. Possible values are Cabac or Cavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level.
    frameRate String
    The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video.
    height String
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label String
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    level String
    The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer.
    maxBitrate Number
    The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
    profile String
    The H.264 profile. Possible values are Auto, Main and Main10. Default to Auto.
    referenceFrames Number
    The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
    slices Number
    The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame.
    width String
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.

    TransformOutputCustomPresetCodecH265Video, TransformOutputCustomPresetCodecH265VideoArgs

    Complexity string
    The complexity of the encoding. Possible values are Balanced, Speed or Quality. Default to Balanced.
    KeyFrameInterval string
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    Layers List<TransformOutputCustomPresetCodecH265VideoLayer>
    One or more layer blocks as defined below.
    SceneChangeDetectionEnabled bool
    Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false.
    StretchMode string
    Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    SyncMode string
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    Complexity string
    The complexity of the encoding. Possible values are Balanced, Speed or Quality. Default to Balanced.
    KeyFrameInterval string
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    Layers []TransformOutputCustomPresetCodecH265VideoLayer
    One or more layer blocks as defined below.
    SceneChangeDetectionEnabled bool
    Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false.
    StretchMode string
    Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    SyncMode string
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    complexity String
    The complexity of the encoding. Possible values are Balanced, Speed or Quality. Default to Balanced.
    keyFrameInterval String
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers List<TransformOutputCustomPresetCodecH265VideoLayer>
    One or more layer blocks as defined below.
    sceneChangeDetectionEnabled Boolean
    Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false.
    stretchMode String
    Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    syncMode String
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    complexity string
    The complexity of the encoding. Possible values are Balanced, Speed or Quality. Default to Balanced.
    keyFrameInterval string
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers TransformOutputCustomPresetCodecH265VideoLayer[]
    One or more layer blocks as defined below.
    sceneChangeDetectionEnabled boolean
    Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false.
    stretchMode string
    Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    syncMode string
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    complexity str
    The complexity of the encoding. Possible values are Balanced, Speed or Quality. Default to Balanced.
    key_frame_interval str
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label str
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers Sequence[TransformOutputCustomPresetCodecH265VideoLayer]
    One or more layer blocks as defined below.
    scene_change_detection_enabled bool
    Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false.
    stretch_mode str
    Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    sync_mode str
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    complexity String
    The complexity of the encoding. Possible values are Balanced, Speed or Quality. Default to Balanced.
    keyFrameInterval String
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers List<Property Map>
    One or more layer blocks as defined below.
    sceneChangeDetectionEnabled Boolean
    Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false.
    stretchMode String
    Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    syncMode String
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.

    TransformOutputCustomPresetCodecH265VideoLayer, TransformOutputCustomPresetCodecH265VideoLayerArgs

    Bitrate int
    The average bitrate in bits per second at which to encode the input video when generating this layer.
    AdaptiveBFrameEnabled bool
    Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true.
    BFrames int
    The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
    BufferWindow string
    Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S.
    Crf double
    The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 28.
    FrameRate string
    The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video.
    Height string
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    Label string
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    Level string
    The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer.
    MaxBitrate int
    The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
    Profile string
    The H.264 profile. Possible values are Auto, Main and Main10. Default to Auto.
    ReferenceFrames int
    The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
    Slices int
    The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame.
    Width string
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    Bitrate int
    The average bitrate in bits per second at which to encode the input video when generating this layer.
    AdaptiveBFrameEnabled bool
    Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true.
    BFrames int
    The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
    BufferWindow string
    Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S.
    Crf float64
    The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 28.
    FrameRate string
    The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video.
    Height string
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    Label string
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    Level string
    The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer.
    MaxBitrate int
    The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
    Profile string
    The H.264 profile. Possible values are Auto, Main and Main10. Default to Auto.
    ReferenceFrames int
    The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
    Slices int
    The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame.
    Width string
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    bitrate Integer
    The average bitrate in bits per second at which to encode the input video when generating this layer.
    adaptiveBFrameEnabled Boolean
    Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true.
    bFrames Integer
    The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
    bufferWindow String
    Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S.
    crf Double
    The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 28.
    frameRate String
    The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video.
    height String
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label String
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    level String
    The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer.
    maxBitrate Integer
    The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
    profile String
    The H.264 profile. Possible values are Auto, Main and Main10. Default to Auto.
    referenceFrames Integer
    The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
    slices Integer
    The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame.
    width String
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    bitrate number
    The average bitrate in bits per second at which to encode the input video when generating this layer.
    adaptiveBFrameEnabled boolean
    Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true.
    bFrames number
    The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
    bufferWindow string
    Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S.
    crf number
    The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 28.
    frameRate string
    The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video.
    height string
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label string
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    level string
    The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer.
    maxBitrate number
    The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
    profile string
    The H.264 profile. Possible values are Auto, Main and Main10. Default to Auto.
    referenceFrames number
    The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
    slices number
    The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame.
    width string
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    bitrate int
    The average bitrate in bits per second at which to encode the input video when generating this layer.
    adaptive_b_frame_enabled bool
    Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true.
    b_frames int
    The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
    buffer_window str
    Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S.
    crf float
    The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 28.
    frame_rate str
    The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video.
    height str
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label str
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    level str
    The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer.
    max_bitrate int
    The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
    profile str
    The H.264 profile. Possible values are Auto, Main and Main10. Default to Auto.
    reference_frames int
    The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
    slices int
    The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame.
    width str
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    bitrate Number
    The average bitrate in bits per second at which to encode the input video when generating this layer.
    adaptiveBFrameEnabled Boolean
    Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true.
    bFrames Number
    The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
    bufferWindow String
    Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S.
    crf Number
    The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 28.
    frameRate String
    The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video.
    height String
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label String
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    level String
    The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer.
    maxBitrate Number
    The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
    profile String
    The H.264 profile. Possible values are Auto, Main and Main10. Default to Auto.
    referenceFrames Number
    The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
    slices Number
    The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame.
    width String
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.

    TransformOutputCustomPresetCodecJpgImage, TransformOutputCustomPresetCodecJpgImageArgs

    Start string
    The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are for step and range.
    KeyFrameInterval string
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    Layers List<TransformOutputCustomPresetCodecJpgImageLayer>
    One or more layer blocks as defined below.
    Range string
    The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream.
    SpriteColumn int
    Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 65535x65535.
    Step string
    The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time.
    StretchMode string
    The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    SyncMode string
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    Start string
    The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are for step and range.
    KeyFrameInterval string
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    Layers []TransformOutputCustomPresetCodecJpgImageLayer
    One or more layer blocks as defined below.
    Range string
    The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream.
    SpriteColumn int
    Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 65535x65535.
    Step string
    The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time.
    StretchMode string
    The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    SyncMode string
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    start String
    The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are for step and range.
    keyFrameInterval String
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers List<TransformOutputCustomPresetCodecJpgImageLayer>
    One or more layer blocks as defined below.
    range String
    The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream.
    spriteColumn Integer
    Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 65535x65535.
    step String
    The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time.
    stretchMode String
    The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    syncMode String
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    start string
    The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are for step and range.
    keyFrameInterval string
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers TransformOutputCustomPresetCodecJpgImageLayer[]
    One or more layer blocks as defined below.
    range string
    The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream.
    spriteColumn number
    Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 65535x65535.
    step string
    The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time.
    stretchMode string
    The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    syncMode string
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    start str
    The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are for step and range.
    key_frame_interval str
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label str
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers Sequence[TransformOutputCustomPresetCodecJpgImageLayer]
    One or more layer blocks as defined below.
    range str
    The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream.
    sprite_column int
    Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 65535x65535.
    step str
    The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time.
    stretch_mode str
    The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    sync_mode str
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    start String
    The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are for step and range.
    keyFrameInterval String
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers List<Property Map>
    One or more layer blocks as defined below.
    range String
    The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream.
    spriteColumn Number
    Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 65535x65535.
    step String
    The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time.
    stretchMode String
    The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    syncMode String
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.

    TransformOutputCustomPresetCodecJpgImageLayer, TransformOutputCustomPresetCodecJpgImageLayerArgs

    Height string
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    Label string
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    Quality int
    The compression quality of the JPEG output. Range is from 0 to 100 and the default is 70.
    Width string
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    Height string
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    Label string
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    Quality int
    The compression quality of the JPEG output. Range is from 0 to 100 and the default is 70.
    Width string
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    height String
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label String
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    quality Integer
    The compression quality of the JPEG output. Range is from 0 to 100 and the default is 70.
    width String
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    height string
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label string
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    quality number
    The compression quality of the JPEG output. Range is from 0 to 100 and the default is 70.
    width string
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    height str
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label str
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    quality int
    The compression quality of the JPEG output. Range is from 0 to 100 and the default is 70.
    width str
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    height String
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label String
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    quality Number
    The compression quality of the JPEG output. Range is from 0 to 100 and the default is 70.
    width String
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.

    TransformOutputCustomPresetCodecPngImage, TransformOutputCustomPresetCodecPngImageArgs

    Start string
    The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are for step and range.
    KeyFrameInterval string
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    Layers List<TransformOutputCustomPresetCodecPngImageLayer>
    One or more layer blocks as defined below.
    Range string
    The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream.
    Step string
    The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time.
    StretchMode string
    The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    SyncMode string
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    Start string
    The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are for step and range.
    KeyFrameInterval string
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    Label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    Layers []TransformOutputCustomPresetCodecPngImageLayer
    One or more layer blocks as defined below.
    Range string
    The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream.
    Step string
    The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time.
    StretchMode string
    The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    SyncMode string
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    start String
    The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are for step and range.
    keyFrameInterval String
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers List<TransformOutputCustomPresetCodecPngImageLayer>
    One or more layer blocks as defined below.
    range String
    The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream.
    step String
    The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time.
    stretchMode String
    The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    syncMode String
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    start string
    The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are for step and range.
    keyFrameInterval string
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label string
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers TransformOutputCustomPresetCodecPngImageLayer[]
    One or more layer blocks as defined below.
    range string
    The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream.
    step string
    The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time.
    stretchMode string
    The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    syncMode string
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    start str
    The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are for step and range.
    key_frame_interval str
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label str
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers Sequence[TransformOutputCustomPresetCodecPngImageLayer]
    One or more layer blocks as defined below.
    range str
    The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream.
    step str
    The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time.
    stretch_mode str
    The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    sync_mode str
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.
    start String
    The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are for step and range.
    keyFrameInterval String
    The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S.
    label String
    Specifies the label for the codec. The label can be used to control muxing behavior.
    layers List<Property Map>
    One or more layer blocks as defined below.
    range String
    The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream.
    step String
    The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time.
    stretchMode String
    The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize.
    syncMode String
    Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto.

    TransformOutputCustomPresetCodecPngImageLayer, TransformOutputCustomPresetCodecPngImageLayerArgs

    Height string
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    Label string
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    Width string
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    Height string
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    Label string
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    Width string
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    height String
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label String
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    width String
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    height string
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label string
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    width string
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    height str
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label str
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    width str
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.
    height String
    The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input.
    label String
    The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
    width String
    The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input.

    TransformOutputCustomPresetFilter, TransformOutputCustomPresetFilterArgs

    CropRectangle TransformOutputCustomPresetFilterCropRectangle
    A crop_rectangle block as defined above.
    Deinterlace TransformOutputCustomPresetFilterDeinterlace
    A deinterlace block as defined below.
    FadeIn TransformOutputCustomPresetFilterFadeIn
    A fade_in block as defined above.
    FadeOut TransformOutputCustomPresetFilterFadeOut
    A fade_out block as defined above.
    Overlays List<TransformOutputCustomPresetFilterOverlay>
    One or more overlay blocks as defined below.
    Rotation string
    The rotation to be applied to the input video before it is encoded. Possible values are Auto, None, Rotate90, Rotate180, Rotate270,or Rotate0. Default to Auto.
    CropRectangle TransformOutputCustomPresetFilterCropRectangle
    A crop_rectangle block as defined above.
    Deinterlace TransformOutputCustomPresetFilterDeinterlace
    A deinterlace block as defined below.
    FadeIn TransformOutputCustomPresetFilterFadeIn
    A fade_in block as defined above.
    FadeOut TransformOutputCustomPresetFilterFadeOut
    A fade_out block as defined above.
    Overlays []TransformOutputCustomPresetFilterOverlay
    One or more overlay blocks as defined below.
    Rotation string
    The rotation to be applied to the input video before it is encoded. Possible values are Auto, None, Rotate90, Rotate180, Rotate270,or Rotate0. Default to Auto.
    cropRectangle TransformOutputCustomPresetFilterCropRectangle
    A crop_rectangle block as defined above.
    deinterlace TransformOutputCustomPresetFilterDeinterlace
    A deinterlace block as defined below.
    fadeIn TransformOutputCustomPresetFilterFadeIn
    A fade_in block as defined above.
    fadeOut TransformOutputCustomPresetFilterFadeOut
    A fade_out block as defined above.
    overlays List<TransformOutputCustomPresetFilterOverlay>
    One or more overlay blocks as defined below.
    rotation String
    The rotation to be applied to the input video before it is encoded. Possible values are Auto, None, Rotate90, Rotate180, Rotate270,or Rotate0. Default to Auto.
    cropRectangle TransformOutputCustomPresetFilterCropRectangle
    A crop_rectangle block as defined above.
    deinterlace TransformOutputCustomPresetFilterDeinterlace
    A deinterlace block as defined below.
    fadeIn TransformOutputCustomPresetFilterFadeIn
    A fade_in block as defined above.
    fadeOut TransformOutputCustomPresetFilterFadeOut
    A fade_out block as defined above.
    overlays TransformOutputCustomPresetFilterOverlay[]
    One or more overlay blocks as defined below.
    rotation string
    The rotation to be applied to the input video before it is encoded. Possible values are Auto, None, Rotate90, Rotate180, Rotate270,or Rotate0. Default to Auto.
    crop_rectangle TransformOutputCustomPresetFilterCropRectangle
    A crop_rectangle block as defined above.
    deinterlace TransformOutputCustomPresetFilterDeinterlace
    A deinterlace block as defined below.
    fade_in TransformOutputCustomPresetFilterFadeIn
    A fade_in block as defined above.
    fade_out TransformOutputCustomPresetFilterFadeOut
    A fade_out block as defined above.
    overlays Sequence[TransformOutputCustomPresetFilterOverlay]
    One or more overlay blocks as defined below.
    rotation str
    The rotation to be applied to the input video before it is encoded. Possible values are Auto, None, Rotate90, Rotate180, Rotate270,or Rotate0. Default to Auto.
    cropRectangle Property Map
    A crop_rectangle block as defined above.
    deinterlace Property Map
    A deinterlace block as defined below.
    fadeIn Property Map
    A fade_in block as defined above.
    fadeOut Property Map
    A fade_out block as defined above.
    overlays List<Property Map>
    One or more overlay blocks as defined below.
    rotation String
    The rotation to be applied to the input video before it is encoded. Possible values are Auto, None, Rotate90, Rotate180, Rotate270,or Rotate0. Default to Auto.

    TransformOutputCustomPresetFilterCropRectangle, TransformOutputCustomPresetFilterCropRectangleArgs

    Height string
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Left string
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Top string
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Width string
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Height string
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Left string
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Top string
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Width string
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    height String
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    left String
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    top String
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    width String
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    height string
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    left string
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    top string
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    width string
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    height str
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    left str
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    top str
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    width str
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    height String
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    left String
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    top String
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    width String
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).

    TransformOutputCustomPresetFilterDeinterlace, TransformOutputCustomPresetFilterDeinterlaceArgs

    Mode string
    The deinterlacing mode. Possible values are AutoPixelAdaptive or Off. Default to AutoPixelAdaptive.
    Parity string
    The field parity to use for deinterlacing. Possible values are Auto, TopFieldFirst or BottomFieldFirst. Default to Auto.
    Mode string
    The deinterlacing mode. Possible values are AutoPixelAdaptive or Off. Default to AutoPixelAdaptive.
    Parity string
    The field parity to use for deinterlacing. Possible values are Auto, TopFieldFirst or BottomFieldFirst. Default to Auto.
    mode String
    The deinterlacing mode. Possible values are AutoPixelAdaptive or Off. Default to AutoPixelAdaptive.
    parity String
    The field parity to use for deinterlacing. Possible values are Auto, TopFieldFirst or BottomFieldFirst. Default to Auto.
    mode string
    The deinterlacing mode. Possible values are AutoPixelAdaptive or Off. Default to AutoPixelAdaptive.
    parity string
    The field parity to use for deinterlacing. Possible values are Auto, TopFieldFirst or BottomFieldFirst. Default to Auto.
    mode str
    The deinterlacing mode. Possible values are AutoPixelAdaptive or Off. Default to AutoPixelAdaptive.
    parity str
    The field parity to use for deinterlacing. Possible values are Auto, TopFieldFirst or BottomFieldFirst. Default to Auto.
    mode String
    The deinterlacing mode. Possible values are AutoPixelAdaptive or Off. Default to AutoPixelAdaptive.
    parity String
    The field parity to use for deinterlacing. Possible values are Auto, TopFieldFirst or BottomFieldFirst. Default to Auto.

    TransformOutputCustomPresetFilterFadeIn, TransformOutputCustomPresetFilterFadeInArgs

    Duration string
    The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
    FadeColor string
    The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.
    Start string
    The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Default to 0.
    Duration string
    The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
    FadeColor string
    The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.
    Start string
    The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Default to 0.
    duration String
    The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
    fadeColor String
    The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.
    start String
    The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Default to 0.
    duration string
    The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
    fadeColor string
    The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.
    start string
    The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Default to 0.
    duration str
    The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
    fade_color str
    The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.
    start str
    The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Default to 0.
    duration String
    The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
    fadeColor String
    The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.
    start String
    The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Default to 0.

    TransformOutputCustomPresetFilterFadeOut, TransformOutputCustomPresetFilterFadeOutArgs

    Duration string
    The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
    FadeColor string
    The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.
    Start string
    The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Default to 0.
    Duration string
    The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
    FadeColor string
    The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.
    Start string
    The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Default to 0.
    duration String
    The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
    fadeColor String
    The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.
    start String
    The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Default to 0.
    duration string
    The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
    fadeColor string
    The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.
    start string
    The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Default to 0.
    duration str
    The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
    fade_color str
    The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.
    start str
    The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Default to 0.
    duration String
    The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
    fadeColor String
    The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.
    start String
    The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or a frame count (For example, 10 to start at the 10th frame), or a relative value to stream duration (For example, 10% to start at 10% of stream duration). Default to 0.

    TransformOutputCustomPresetFilterOverlay, TransformOutputCustomPresetFilterOverlayArgs

    Audio TransformOutputCustomPresetFilterOverlayAudio
    An audio block as defined above.
    Video TransformOutputCustomPresetFilterOverlayVideo

    A video block as defined below.

    NOTE: Each overlay can only have one type: audio or video. If you need to apply different type you must create one overlay for each one.

    Audio TransformOutputCustomPresetFilterOverlayAudio
    An audio block as defined above.
    Video TransformOutputCustomPresetFilterOverlayVideo

    A video block as defined below.

    NOTE: Each overlay can only have one type: audio or video. If you need to apply different type you must create one overlay for each one.

    audio TransformOutputCustomPresetFilterOverlayAudio
    An audio block as defined above.
    video TransformOutputCustomPresetFilterOverlayVideo

    A video block as defined below.

    NOTE: Each overlay can only have one type: audio or video. If you need to apply different type you must create one overlay for each one.

    audio TransformOutputCustomPresetFilterOverlayAudio
    An audio block as defined above.
    video TransformOutputCustomPresetFilterOverlayVideo

    A video block as defined below.

    NOTE: Each overlay can only have one type: audio or video. If you need to apply different type you must create one overlay for each one.

    audio TransformOutputCustomPresetFilterOverlayAudio
    An audio block as defined above.
    video TransformOutputCustomPresetFilterOverlayVideo

    A video block as defined below.

    NOTE: Each overlay can only have one type: audio or video. If you need to apply different type you must create one overlay for each one.

    audio Property Map
    An audio block as defined above.
    video Property Map

    A video block as defined below.

    NOTE: Each overlay can only have one type: audio or video. If you need to apply different type you must create one overlay for each one.

    TransformOutputCustomPresetFilterOverlayAudio, TransformOutputCustomPresetFilterOverlayAudioArgs

    InputLabel string
    The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
    AudioGainLevel double
    The gain level of audio in the overlay. The value should be in the range 0 to 1.0. The default is 1.0.
    End string
    The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration.
    FadeInDuration string
    The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S).
    FadeOutDuration string
    The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S).
    Start string
    The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video.
    InputLabel string
    The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
    AudioGainLevel float64
    The gain level of audio in the overlay. The value should be in the range 0 to 1.0. The default is 1.0.
    End string
    The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration.
    FadeInDuration string
    The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S).
    FadeOutDuration string
    The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S).
    Start string
    The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video.
    inputLabel String
    The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
    audioGainLevel Double
    The gain level of audio in the overlay. The value should be in the range 0 to 1.0. The default is 1.0.
    end String
    The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration.
    fadeInDuration String
    The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S).
    fadeOutDuration String
    The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S).
    start String
    The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video.
    inputLabel string
    The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
    audioGainLevel number
    The gain level of audio in the overlay. The value should be in the range 0 to 1.0. The default is 1.0.
    end string
    The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration.
    fadeInDuration string
    The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S).
    fadeOutDuration string
    The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S).
    start string
    The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video.
    input_label str
    The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
    audio_gain_level float
    The gain level of audio in the overlay. The value should be in the range 0 to 1.0. The default is 1.0.
    end str
    The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration.
    fade_in_duration str
    The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S).
    fade_out_duration str
    The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S).
    start str
    The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video.
    inputLabel String
    The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
    audioGainLevel Number
    The gain level of audio in the overlay. The value should be in the range 0 to 1.0. The default is 1.0.
    end String
    The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration.
    fadeInDuration String
    The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S).
    fadeOutDuration String
    The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S).
    start String
    The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video.

    TransformOutputCustomPresetFilterOverlayVideo, TransformOutputCustomPresetFilterOverlayVideoArgs

    InputLabel string
    The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
    AudioGainLevel double
    The gain level of audio in the overlay. The value should be in range between 0 to 1.0. The default is 1.0.
    CropRectangle TransformOutputCustomPresetFilterOverlayVideoCropRectangle
    A crop_rectangle block as defined above.
    End string
    The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration.
    FadeInDuration string
    The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S).
    FadeOutDuration string
    The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S).
    Opacity double
    The opacity of the overlay. The value should be in the range between 0 to 1.0. Default to 1.0, which means the overlay is opaque.
    Position TransformOutputCustomPresetFilterOverlayVideoPosition
    A position block as defined above.
    Start string
    The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video.
    InputLabel string
    The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
    AudioGainLevel float64
    The gain level of audio in the overlay. The value should be in range between 0 to 1.0. The default is 1.0.
    CropRectangle TransformOutputCustomPresetFilterOverlayVideoCropRectangle
    A crop_rectangle block as defined above.
    End string
    The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration.
    FadeInDuration string
    The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S).
    FadeOutDuration string
    The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S).
    Opacity float64
    The opacity of the overlay. The value should be in the range between 0 to 1.0. Default to 1.0, which means the overlay is opaque.
    Position TransformOutputCustomPresetFilterOverlayVideoPosition
    A position block as defined above.
    Start string
    The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video.
    inputLabel String
    The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
    audioGainLevel Double
    The gain level of audio in the overlay. The value should be in range between 0 to 1.0. The default is 1.0.
    cropRectangle TransformOutputCustomPresetFilterOverlayVideoCropRectangle
    A crop_rectangle block as defined above.
    end String
    The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration.
    fadeInDuration String
    The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S).
    fadeOutDuration String
    The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S).
    opacity Double
    The opacity of the overlay. The value should be in the range between 0 to 1.0. Default to 1.0, which means the overlay is opaque.
    position TransformOutputCustomPresetFilterOverlayVideoPosition
    A position block as defined above.
    start String
    The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video.
    inputLabel string
    The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
    audioGainLevel number
    The gain level of audio in the overlay. The value should be in range between 0 to 1.0. The default is 1.0.
    cropRectangle TransformOutputCustomPresetFilterOverlayVideoCropRectangle
    A crop_rectangle block as defined above.
    end string
    The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration.
    fadeInDuration string
    The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S).
    fadeOutDuration string
    The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S).
    opacity number
    The opacity of the overlay. The value should be in the range between 0 to 1.0. Default to 1.0, which means the overlay is opaque.
    position TransformOutputCustomPresetFilterOverlayVideoPosition
    A position block as defined above.
    start string
    The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video.
    input_label str
    The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
    audio_gain_level float
    The gain level of audio in the overlay. The value should be in range between 0 to 1.0. The default is 1.0.
    crop_rectangle TransformOutputCustomPresetFilterOverlayVideoCropRectangle
    A crop_rectangle block as defined above.
    end str
    The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration.
    fade_in_duration str
    The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S).
    fade_out_duration str
    The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S).
    opacity float
    The opacity of the overlay. The value should be in the range between 0 to 1.0. Default to 1.0, which means the overlay is opaque.
    position TransformOutputCustomPresetFilterOverlayVideoPosition
    A position block as defined above.
    start str
    The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video.
    inputLabel String
    The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
    audioGainLevel Number
    The gain level of audio in the overlay. The value should be in range between 0 to 1.0. The default is 1.0.
    cropRectangle Property Map
    A crop_rectangle block as defined above.
    end String
    The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration.
    fadeInDuration String
    The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S).
    fadeOutDuration String
    The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S).
    opacity Number
    The opacity of the overlay. The value should be in the range between 0 to 1.0. Default to 1.0, which means the overlay is opaque.
    position Property Map
    A position block as defined above.
    start String
    The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video.

    TransformOutputCustomPresetFilterOverlayVideoCropRectangle, TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs

    Height string
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Left string
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Top string
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Width string
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Height string
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Left string
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Top string
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Width string
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    height String
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    left String
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    top String
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    width String
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    height string
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    left string
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    top string
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    width string
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    height str
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    left str
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    top str
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    width str
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    height String
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    left String
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    top String
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    width String
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).

    TransformOutputCustomPresetFilterOverlayVideoPosition, TransformOutputCustomPresetFilterOverlayVideoPositionArgs

    Height string
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Left string
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Top string
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Width string
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Height string
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Left string
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Top string
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    Width string
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    height String
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    left String
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    top String
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    width String
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    height string
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    left string
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    top string
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    width string
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    height str
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    left str
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    top str
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    width str
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    height String
    The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    left String
    The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    top String
    The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).
    width String
    The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%).

    TransformOutputCustomPresetFormat, TransformOutputCustomPresetFormatArgs

    Jpg TransformOutputCustomPresetFormatJpg
    A jpg block as defined below.
    Mp4 TransformOutputCustomPresetFormatMp4
    A mp4 block as defined below.
    Png TransformOutputCustomPresetFormatPng
    A png block as defined below.
    TransportStream TransformOutputCustomPresetFormatTransportStream

    A transport_stream block as defined below.

    NOTE: Each format can only have one type: jpg, mp4, png or transport_stream. If you need to apply different type you must create one format for each one.

    Jpg TransformOutputCustomPresetFormatJpg
    A jpg block as defined below.
    Mp4 TransformOutputCustomPresetFormatMp4
    A mp4 block as defined below.
    Png TransformOutputCustomPresetFormatPng
    A png block as defined below.
    TransportStream TransformOutputCustomPresetFormatTransportStream

    A transport_stream block as defined below.

    NOTE: Each format can only have one type: jpg, mp4, png or transport_stream. If you need to apply different type you must create one format for each one.

    jpg TransformOutputCustomPresetFormatJpg
    A jpg block as defined below.
    mp4 TransformOutputCustomPresetFormatMp4
    A mp4 block as defined below.
    png TransformOutputCustomPresetFormatPng
    A png block as defined below.
    transportStream TransformOutputCustomPresetFormatTransportStream

    A transport_stream block as defined below.

    NOTE: Each format can only have one type: jpg, mp4, png or transport_stream. If you need to apply different type you must create one format for each one.

    jpg TransformOutputCustomPresetFormatJpg
    A jpg block as defined below.
    mp4 TransformOutputCustomPresetFormatMp4
    A mp4 block as defined below.
    png TransformOutputCustomPresetFormatPng
    A png block as defined below.
    transportStream TransformOutputCustomPresetFormatTransportStream

    A transport_stream block as defined below.

    NOTE: Each format can only have one type: jpg, mp4, png or transport_stream. If you need to apply different type you must create one format for each one.

    jpg TransformOutputCustomPresetFormatJpg
    A jpg block as defined below.
    mp4 TransformOutputCustomPresetFormatMp4
    A mp4 block as defined below.
    png TransformOutputCustomPresetFormatPng
    A png block as defined below.
    transport_stream TransformOutputCustomPresetFormatTransportStream

    A transport_stream block as defined below.

    NOTE: Each format can only have one type: jpg, mp4, png or transport_stream. If you need to apply different type you must create one format for each one.

    jpg Property Map
    A jpg block as defined below.
    mp4 Property Map
    A mp4 block as defined below.
    png Property Map
    A png block as defined below.
    transportStream Property Map

    A transport_stream block as defined below.

    NOTE: Each format can only have one type: jpg, mp4, png or transport_stream. If you need to apply different type you must create one format for each one.

    TransformOutputCustomPresetFormatJpg, TransformOutputCustomPresetFormatJpgArgs

    FilenamePattern string
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    FilenamePattern string
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    filenamePattern String
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    filenamePattern string
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    filename_pattern str
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    filenamePattern String
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.

    TransformOutputCustomPresetFormatMp4, TransformOutputCustomPresetFormatMp4Args

    FilenamePattern string
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    OutputFiles List<TransformOutputCustomPresetFormatMp4OutputFile>
    One or more output_file blocks as defined below.
    FilenamePattern string
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    OutputFiles []TransformOutputCustomPresetFormatMp4OutputFile
    One or more output_file blocks as defined below.
    filenamePattern String
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    outputFiles List<TransformOutputCustomPresetFormatMp4OutputFile>
    One or more output_file blocks as defined below.
    filenamePattern string
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    outputFiles TransformOutputCustomPresetFormatMp4OutputFile[]
    One or more output_file blocks as defined below.
    filename_pattern str
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    output_files Sequence[TransformOutputCustomPresetFormatMp4OutputFile]
    One or more output_file blocks as defined below.
    filenamePattern String
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    outputFiles List<Property Map>
    One or more output_file blocks as defined below.

    TransformOutputCustomPresetFormatMp4OutputFile, TransformOutputCustomPresetFormatMp4OutputFileArgs

    Labels List<string>
    The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1.
    Labels []string
    The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1.
    labels List<String>
    The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1.
    labels string[]
    The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1.
    labels Sequence[str]
    The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1.
    labels List<String>
    The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1.

    TransformOutputCustomPresetFormatPng, TransformOutputCustomPresetFormatPngArgs

    FilenamePattern string
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    FilenamePattern string
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    filenamePattern String
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    filenamePattern string
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    filename_pattern str
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    filenamePattern String
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.

    TransformOutputCustomPresetFormatTransportStream, TransformOutputCustomPresetFormatTransportStreamArgs

    FilenamePattern string
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    OutputFiles List<TransformOutputCustomPresetFormatTransportStreamOutputFile>
    One or more output_file blocks as defined above.
    FilenamePattern string
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    OutputFiles []TransformOutputCustomPresetFormatTransportStreamOutputFile
    One or more output_file blocks as defined above.
    filenamePattern String
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    outputFiles List<TransformOutputCustomPresetFormatTransportStreamOutputFile>
    One or more output_file blocks as defined above.
    filenamePattern string
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    outputFiles TransformOutputCustomPresetFormatTransportStreamOutputFile[]
    One or more output_file blocks as defined above.
    filename_pattern str
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    output_files Sequence[TransformOutputCustomPresetFormatTransportStreamOutputFile]
    One or more output_file blocks as defined above.
    filenamePattern String
    The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename.
    outputFiles List<Property Map>
    One or more output_file blocks as defined above.

    TransformOutputCustomPresetFormatTransportStreamOutputFile, TransformOutputCustomPresetFormatTransportStreamOutputFileArgs

    Labels List<string>
    The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1.
    Labels []string
    The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1.
    labels List<String>
    The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1.
    labels string[]
    The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1.
    labels Sequence[str]
    The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1.
    labels List<String>
    The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1.

    TransformOutputFaceDetectorPreset, TransformOutputFaceDetectorPresetArgs

    AnalysisResolution string
    Possible values are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default to SourceResolution.
    BlurType string
    Specifies the type of blur to apply to faces in the output video. Possible values are Black, Box, High, Low,and Med.
    ExperimentalOptions Dictionary<string, string>
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    FaceRedactorMode string
    This mode provides the ability to choose between the following settings: 1) Analyze - For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default to Analyze.
    AnalysisResolution string
    Possible values are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default to SourceResolution.
    BlurType string
    Specifies the type of blur to apply to faces in the output video. Possible values are Black, Box, High, Low,and Med.
    ExperimentalOptions map[string]string
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    FaceRedactorMode string
    This mode provides the ability to choose between the following settings: 1) Analyze - For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default to Analyze.
    analysisResolution String
    Possible values are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default to SourceResolution.
    blurType String
    Specifies the type of blur to apply to faces in the output video. Possible values are Black, Box, High, Low,and Med.
    experimentalOptions Map<String,String>
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    faceRedactorMode String
    This mode provides the ability to choose between the following settings: 1) Analyze - For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default to Analyze.
    analysisResolution string
    Possible values are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default to SourceResolution.
    blurType string
    Specifies the type of blur to apply to faces in the output video. Possible values are Black, Box, High, Low,and Med.
    experimentalOptions {[key: string]: string}
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    faceRedactorMode string
    This mode provides the ability to choose between the following settings: 1) Analyze - For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default to Analyze.
    analysis_resolution str
    Possible values are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default to SourceResolution.
    blur_type str
    Specifies the type of blur to apply to faces in the output video. Possible values are Black, Box, High, Low,and Med.
    experimental_options Mapping[str, str]
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    face_redactor_mode str
    This mode provides the ability to choose between the following settings: 1) Analyze - For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default to Analyze.
    analysisResolution String
    Possible values are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default to SourceResolution.
    blurType String
    Specifies the type of blur to apply to faces in the output video. Possible values are Black, Box, High, Low,and Med.
    experimentalOptions Map<String>
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    faceRedactorMode String
    This mode provides the ability to choose between the following settings: 1) Analyze - For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default to Analyze.

    TransformOutputVideoAnalyzerPreset, TransformOutputVideoAnalyzerPresetArgs

    AudioAnalysisMode string
    Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard.
    AudioLanguage string
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    ExperimentalOptions Dictionary<string, string>
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    InsightsType string
    Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default to AllInsights.
    AudioAnalysisMode string
    Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard.
    AudioLanguage string
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    ExperimentalOptions map[string]string
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    InsightsType string
    Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default to AllInsights.
    audioAnalysisMode String
    Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard.
    audioLanguage String
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    experimentalOptions Map<String,String>
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    insightsType String
    Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default to AllInsights.
    audioAnalysisMode string
    Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard.
    audioLanguage string
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    experimentalOptions {[key: string]: string}
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    insightsType string
    Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default to AllInsights.
    audio_analysis_mode str
    Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard.
    audio_language str
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    experimental_options Mapping[str, str]
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    insights_type str
    Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default to AllInsights.
    audioAnalysisMode String
    Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard.
    audioLanguage String
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    experimentalOptions Map<String>
    Dictionary containing key value pairs for parameters not exposed in the preset itself.
    insightsType String
    Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default to AllInsights.

    Import

    Transforms can be imported using the resource id, e.g.

    $ pulumi import azure:media/transform:Transform example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Media/mediaServices/media1/transforms/transform1
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Azure Classic pulumi/pulumi-azure
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the azurerm Terraform Provider.
    azure logo

    We recommend using Azure Native.

    Azure Classic v5.73.0 published on Monday, Apr 22, 2024 by Pulumi