datarobot.BatchPredictionJobDefinition
Explore with Pulumi AI
Batch Prediction Job Definition
Example Usage
Coming soon!
Coming soon!
Coming soon!
Coming soon!
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.datarobot.BatchPredictionJobDefinition;
import com.pulumi.datarobot.BatchPredictionJobDefinitionArgs;
import com.pulumi.datarobot.inputs.BatchPredictionJobDefinitionIntakeSettingsArgs;
import com.pulumi.datarobot.inputs.BatchPredictionJobDefinitionOutputSettingsArgs;
import com.pulumi.datarobot.inputs.BatchPredictionJobDefinitionCsvSettingsArgs;
import com.pulumi.datarobot.inputs.BatchPredictionJobDefinitionScheduleArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new BatchPredictionJobDefinition("example", BatchPredictionJobDefinitionArgs.builder()
.deploymentId(datarobot_deployment.batch_prediction_job_definition().id())
.intakeSettings(BatchPredictionJobDefinitionIntakeSettingsArgs.builder()
.type("s3")
.url("s3://datarobot-public-datasets-redistributable/1k_diabetes_simplified_features.csv")
.credential_id(datarobot_basic_credential.batch_prediction_job_definition().id())
.build())
.outputSettings(BatchPredictionJobDefinitionOutputSettingsArgs.builder()
.type("s3")
.url("s3://my-test-bucket/predictions.csv")
.credential_id(datarobot_basic_credential.batch_prediction_job_definition().id())
.build())
.csvSettings(BatchPredictionJobDefinitionCsvSettingsArgs.builder()
.delimiter(".")
.quotechar("'")
.encoding("utf-8")
.build())
.numConcurrent(1)
.chunkSize(10)
.maxExplanations(5)
.thresholdHigh(0.8)
.thresholdLow(0.2)
.predictionThreshold(0.5)
.includePredictionStatus(true)
.skipDriftTracking(true)
.passthroughColumnsSet("all")
.abortOnError(false)
.includeProbabilities(true)
.columnNamesRemapping(Map.of("col1", "newCol1"))
.schedule(BatchPredictionJobDefinitionScheduleArgs.builder()
.minute(
"15",
"45")
.hour("*")
.month("*")
.day_of_month("*")
.day_of_week("*")
.build())
.build());
ctx.export("exampleId", example.id());
}
}
resources:
example:
type: datarobot:BatchPredictionJobDefinition
properties:
deploymentId: ${datarobot_deployment.batch_prediction_job_definition.id}
intakeSettings:
type: s3
url: s3://datarobot-public-datasets-redistributable/1k_diabetes_simplified_features.csv
credential_id: ${datarobot_basic_credential.batch_prediction_job_definition.id}
# Optional parameters
outputSettings:
type: s3
url: s3://my-test-bucket/predictions.csv
credential_id: ${datarobot_basic_credential.batch_prediction_job_definition.id}
csvSettings:
delimiter: .
quotechar: ''''
encoding: utf-8
numConcurrent: 1
chunkSize: 10
maxExplanations: 5
thresholdHigh: 0.8
thresholdLow: 0.2
predictionThreshold: 0.5
includePredictionStatus: true
skipDriftTracking: true
passthroughColumnsSet: all
abortOnError: false
includeProbabilities: true
columnNamesRemapping:
col1: newCol1
schedule:
minute:
- '15'
- '45'
hour:
- '*'
month:
- '*'
day_of_month:
- '*'
day_of_week:
- '*'
outputs:
exampleId: ${example.id}
Create BatchPredictionJobDefinition Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new BatchPredictionJobDefinition(name: string, args: BatchPredictionJobDefinitionArgs, opts?: CustomResourceOptions);
@overload
def BatchPredictionJobDefinition(resource_name: str,
args: BatchPredictionJobDefinitionArgs,
opts: Optional[ResourceOptions] = None)
@overload
def BatchPredictionJobDefinition(resource_name: str,
opts: Optional[ResourceOptions] = None,
deployment_id: Optional[str] = None,
intake_settings: Optional[BatchPredictionJobDefinitionIntakeSettingsArgs] = None,
name: Optional[str] = None,
timeseries_settings: Optional[BatchPredictionJobDefinitionTimeseriesSettingsArgs] = None,
abort_on_error: Optional[bool] = None,
enabled: Optional[bool] = None,
explanation_algorithm: Optional[str] = None,
include_prediction_status: Optional[bool] = None,
include_probabilities: Optional[bool] = None,
include_probabilities_classes: Optional[Sequence[str]] = None,
chunk_size: Optional[Any] = None,
output_settings: Optional[BatchPredictionJobDefinitionOutputSettingsArgs] = None,
column_names_remapping: Optional[Mapping[str, str]] = None,
csv_settings: Optional[BatchPredictionJobDefinitionCsvSettingsArgs] = None,
max_explanations: Optional[int] = None,
passthrough_columns: Optional[Sequence[str]] = None,
passthrough_columns_set: Optional[str] = None,
prediction_instance: Optional[BatchPredictionJobDefinitionPredictionInstanceArgs] = None,
prediction_threshold: Optional[float] = None,
prediction_warning_enabled: Optional[bool] = None,
schedule: Optional[BatchPredictionJobDefinitionScheduleArgs] = None,
skip_drift_tracking: Optional[bool] = None,
threshold_high: Optional[float] = None,
threshold_low: Optional[float] = None,
num_concurrent: Optional[int] = None)
func NewBatchPredictionJobDefinition(ctx *Context, name string, args BatchPredictionJobDefinitionArgs, opts ...ResourceOption) (*BatchPredictionJobDefinition, error)
public BatchPredictionJobDefinition(string name, BatchPredictionJobDefinitionArgs args, CustomResourceOptions? opts = null)
public BatchPredictionJobDefinition(String name, BatchPredictionJobDefinitionArgs args)
public BatchPredictionJobDefinition(String name, BatchPredictionJobDefinitionArgs args, CustomResourceOptions options)
type: datarobot:BatchPredictionJobDefinition
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args BatchPredictionJobDefinitionArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args BatchPredictionJobDefinitionArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args BatchPredictionJobDefinitionArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args BatchPredictionJobDefinitionArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args BatchPredictionJobDefinitionArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var batchPredictionJobDefinitionResource = new Datarobot.BatchPredictionJobDefinition("batchPredictionJobDefinitionResource", new()
{
DeploymentId = "string",
IntakeSettings = new Datarobot.Inputs.BatchPredictionJobDefinitionIntakeSettingsArgs
{
Type = "string",
Catalog = "string",
CredentialId = "string",
DataStoreId = "string",
DatasetId = "string",
EndpointUrl = "string",
FetchSize = 0,
File = "string",
Query = "string",
Schema = "string",
Table = "string",
Url = "string",
},
Name = "string",
TimeseriesSettings = new Datarobot.Inputs.BatchPredictionJobDefinitionTimeseriesSettingsArgs
{
ForecastPoint = "string",
PredictionsEndDate = "string",
PredictionsStartDate = "string",
RelaxKnownInAdvanceFeaturesCheck = false,
Type = "string",
},
AbortOnError = false,
Enabled = false,
ExplanationAlgorithm = "string",
IncludePredictionStatus = false,
IncludeProbabilities = false,
IncludeProbabilitiesClasses = new[]
{
"string",
},
ChunkSize = "any",
OutputSettings = new Datarobot.Inputs.BatchPredictionJobDefinitionOutputSettingsArgs
{
Catalog = "string",
CreateTableIfNotExists = false,
CredentialId = "string",
DataStoreId = "string",
EndpointUrl = "string",
Path = "string",
Schema = "string",
StatementType = "string",
Table = "string",
Type = "string",
UpdateColumns = new[]
{
"string",
},
Url = "string",
WhereColumns = new[]
{
"string",
},
},
ColumnNamesRemapping =
{
{ "string", "string" },
},
CsvSettings = new Datarobot.Inputs.BatchPredictionJobDefinitionCsvSettingsArgs
{
Delimiter = "string",
Encoding = "string",
Quotechar = "string",
},
MaxExplanations = 0,
PassthroughColumns = new[]
{
"string",
},
PassthroughColumnsSet = "string",
PredictionInstance = new Datarobot.Inputs.BatchPredictionJobDefinitionPredictionInstanceArgs
{
HostName = "string",
ApiKey = "string",
DatarobotKey = "string",
SslEnabled = false,
},
PredictionThreshold = 0,
PredictionWarningEnabled = false,
Schedule = new Datarobot.Inputs.BatchPredictionJobDefinitionScheduleArgs
{
DayOfMonths = new[]
{
"string",
},
DayOfWeeks = new[]
{
"string",
},
Hours = new[]
{
"string",
},
Minutes = new[]
{
"string",
},
Months = new[]
{
"string",
},
},
SkipDriftTracking = false,
ThresholdHigh = 0,
ThresholdLow = 0,
NumConcurrent = 0,
});
example, err := datarobot.NewBatchPredictionJobDefinition(ctx, "batchPredictionJobDefinitionResource", &datarobot.BatchPredictionJobDefinitionArgs{
DeploymentId: pulumi.String("string"),
IntakeSettings: &datarobot.BatchPredictionJobDefinitionIntakeSettingsArgs{
Type: pulumi.String("string"),
Catalog: pulumi.String("string"),
CredentialId: pulumi.String("string"),
DataStoreId: pulumi.String("string"),
DatasetId: pulumi.String("string"),
EndpointUrl: pulumi.String("string"),
FetchSize: pulumi.Int(0),
File: pulumi.String("string"),
Query: pulumi.String("string"),
Schema: pulumi.String("string"),
Table: pulumi.String("string"),
Url: pulumi.String("string"),
},
Name: pulumi.String("string"),
TimeseriesSettings: &datarobot.BatchPredictionJobDefinitionTimeseriesSettingsArgs{
ForecastPoint: pulumi.String("string"),
PredictionsEndDate: pulumi.String("string"),
PredictionsStartDate: pulumi.String("string"),
RelaxKnownInAdvanceFeaturesCheck: pulumi.Bool(false),
Type: pulumi.String("string"),
},
AbortOnError: pulumi.Bool(false),
Enabled: pulumi.Bool(false),
ExplanationAlgorithm: pulumi.String("string"),
IncludePredictionStatus: pulumi.Bool(false),
IncludeProbabilities: pulumi.Bool(false),
IncludeProbabilitiesClasses: pulumi.StringArray{
pulumi.String("string"),
},
ChunkSize: pulumi.Any("any"),
OutputSettings: &datarobot.BatchPredictionJobDefinitionOutputSettingsArgs{
Catalog: pulumi.String("string"),
CreateTableIfNotExists: pulumi.Bool(false),
CredentialId: pulumi.String("string"),
DataStoreId: pulumi.String("string"),
EndpointUrl: pulumi.String("string"),
Path: pulumi.String("string"),
Schema: pulumi.String("string"),
StatementType: pulumi.String("string"),
Table: pulumi.String("string"),
Type: pulumi.String("string"),
UpdateColumns: pulumi.StringArray{
pulumi.String("string"),
},
Url: pulumi.String("string"),
WhereColumns: pulumi.StringArray{
pulumi.String("string"),
},
},
ColumnNamesRemapping: pulumi.StringMap{
"string": pulumi.String("string"),
},
CsvSettings: &datarobot.BatchPredictionJobDefinitionCsvSettingsArgs{
Delimiter: pulumi.String("string"),
Encoding: pulumi.String("string"),
Quotechar: pulumi.String("string"),
},
MaxExplanations: pulumi.Int(0),
PassthroughColumns: pulumi.StringArray{
pulumi.String("string"),
},
PassthroughColumnsSet: pulumi.String("string"),
PredictionInstance: &datarobot.BatchPredictionJobDefinitionPredictionInstanceArgs{
HostName: pulumi.String("string"),
ApiKey: pulumi.String("string"),
DatarobotKey: pulumi.String("string"),
SslEnabled: pulumi.Bool(false),
},
PredictionThreshold: pulumi.Float64(0),
PredictionWarningEnabled: pulumi.Bool(false),
Schedule: &datarobot.BatchPredictionJobDefinitionScheduleArgs{
DayOfMonths: pulumi.StringArray{
pulumi.String("string"),
},
DayOfWeeks: pulumi.StringArray{
pulumi.String("string"),
},
Hours: pulumi.StringArray{
pulumi.String("string"),
},
Minutes: pulumi.StringArray{
pulumi.String("string"),
},
Months: pulumi.StringArray{
pulumi.String("string"),
},
},
SkipDriftTracking: pulumi.Bool(false),
ThresholdHigh: pulumi.Float64(0),
ThresholdLow: pulumi.Float64(0),
NumConcurrent: pulumi.Int(0),
})
var batchPredictionJobDefinitionResource = new BatchPredictionJobDefinition("batchPredictionJobDefinitionResource", BatchPredictionJobDefinitionArgs.builder()
.deploymentId("string")
.intakeSettings(BatchPredictionJobDefinitionIntakeSettingsArgs.builder()
.type("string")
.catalog("string")
.credentialId("string")
.dataStoreId("string")
.datasetId("string")
.endpointUrl("string")
.fetchSize(0)
.file("string")
.query("string")
.schema("string")
.table("string")
.url("string")
.build())
.name("string")
.timeseriesSettings(BatchPredictionJobDefinitionTimeseriesSettingsArgs.builder()
.forecastPoint("string")
.predictionsEndDate("string")
.predictionsStartDate("string")
.relaxKnownInAdvanceFeaturesCheck(false)
.type("string")
.build())
.abortOnError(false)
.enabled(false)
.explanationAlgorithm("string")
.includePredictionStatus(false)
.includeProbabilities(false)
.includeProbabilitiesClasses("string")
.chunkSize("any")
.outputSettings(BatchPredictionJobDefinitionOutputSettingsArgs.builder()
.catalog("string")
.createTableIfNotExists(false)
.credentialId("string")
.dataStoreId("string")
.endpointUrl("string")
.path("string")
.schema("string")
.statementType("string")
.table("string")
.type("string")
.updateColumns("string")
.url("string")
.whereColumns("string")
.build())
.columnNamesRemapping(Map.of("string", "string"))
.csvSettings(BatchPredictionJobDefinitionCsvSettingsArgs.builder()
.delimiter("string")
.encoding("string")
.quotechar("string")
.build())
.maxExplanations(0)
.passthroughColumns("string")
.passthroughColumnsSet("string")
.predictionInstance(BatchPredictionJobDefinitionPredictionInstanceArgs.builder()
.hostName("string")
.apiKey("string")
.datarobotKey("string")
.sslEnabled(false)
.build())
.predictionThreshold(0)
.predictionWarningEnabled(false)
.schedule(BatchPredictionJobDefinitionScheduleArgs.builder()
.dayOfMonths("string")
.dayOfWeeks("string")
.hours("string")
.minutes("string")
.months("string")
.build())
.skipDriftTracking(false)
.thresholdHigh(0)
.thresholdLow(0)
.numConcurrent(0)
.build());
batch_prediction_job_definition_resource = datarobot.BatchPredictionJobDefinition("batchPredictionJobDefinitionResource",
deployment_id="string",
intake_settings={
"type": "string",
"catalog": "string",
"credential_id": "string",
"data_store_id": "string",
"dataset_id": "string",
"endpoint_url": "string",
"fetch_size": 0,
"file": "string",
"query": "string",
"schema": "string",
"table": "string",
"url": "string",
},
name="string",
timeseries_settings={
"forecast_point": "string",
"predictions_end_date": "string",
"predictions_start_date": "string",
"relax_known_in_advance_features_check": False,
"type": "string",
},
abort_on_error=False,
enabled=False,
explanation_algorithm="string",
include_prediction_status=False,
include_probabilities=False,
include_probabilities_classes=["string"],
chunk_size="any",
output_settings={
"catalog": "string",
"create_table_if_not_exists": False,
"credential_id": "string",
"data_store_id": "string",
"endpoint_url": "string",
"path": "string",
"schema": "string",
"statement_type": "string",
"table": "string",
"type": "string",
"update_columns": ["string"],
"url": "string",
"where_columns": ["string"],
},
column_names_remapping={
"string": "string",
},
csv_settings={
"delimiter": "string",
"encoding": "string",
"quotechar": "string",
},
max_explanations=0,
passthrough_columns=["string"],
passthrough_columns_set="string",
prediction_instance={
"host_name": "string",
"api_key": "string",
"datarobot_key": "string",
"ssl_enabled": False,
},
prediction_threshold=0,
prediction_warning_enabled=False,
schedule={
"day_of_months": ["string"],
"day_of_weeks": ["string"],
"hours": ["string"],
"minutes": ["string"],
"months": ["string"],
},
skip_drift_tracking=False,
threshold_high=0,
threshold_low=0,
num_concurrent=0)
const batchPredictionJobDefinitionResource = new datarobot.BatchPredictionJobDefinition("batchPredictionJobDefinitionResource", {
deploymentId: "string",
intakeSettings: {
type: "string",
catalog: "string",
credentialId: "string",
dataStoreId: "string",
datasetId: "string",
endpointUrl: "string",
fetchSize: 0,
file: "string",
query: "string",
schema: "string",
table: "string",
url: "string",
},
name: "string",
timeseriesSettings: {
forecastPoint: "string",
predictionsEndDate: "string",
predictionsStartDate: "string",
relaxKnownInAdvanceFeaturesCheck: false,
type: "string",
},
abortOnError: false,
enabled: false,
explanationAlgorithm: "string",
includePredictionStatus: false,
includeProbabilities: false,
includeProbabilitiesClasses: ["string"],
chunkSize: "any",
outputSettings: {
catalog: "string",
createTableIfNotExists: false,
credentialId: "string",
dataStoreId: "string",
endpointUrl: "string",
path: "string",
schema: "string",
statementType: "string",
table: "string",
type: "string",
updateColumns: ["string"],
url: "string",
whereColumns: ["string"],
},
columnNamesRemapping: {
string: "string",
},
csvSettings: {
delimiter: "string",
encoding: "string",
quotechar: "string",
},
maxExplanations: 0,
passthroughColumns: ["string"],
passthroughColumnsSet: "string",
predictionInstance: {
hostName: "string",
apiKey: "string",
datarobotKey: "string",
sslEnabled: false,
},
predictionThreshold: 0,
predictionWarningEnabled: false,
schedule: {
dayOfMonths: ["string"],
dayOfWeeks: ["string"],
hours: ["string"],
minutes: ["string"],
months: ["string"],
},
skipDriftTracking: false,
thresholdHigh: 0,
thresholdLow: 0,
numConcurrent: 0,
});
type: datarobot:BatchPredictionJobDefinition
properties:
abortOnError: false
chunkSize: any
columnNamesRemapping:
string: string
csvSettings:
delimiter: string
encoding: string
quotechar: string
deploymentId: string
enabled: false
explanationAlgorithm: string
includePredictionStatus: false
includeProbabilities: false
includeProbabilitiesClasses:
- string
intakeSettings:
catalog: string
credentialId: string
dataStoreId: string
datasetId: string
endpointUrl: string
fetchSize: 0
file: string
query: string
schema: string
table: string
type: string
url: string
maxExplanations: 0
name: string
numConcurrent: 0
outputSettings:
catalog: string
createTableIfNotExists: false
credentialId: string
dataStoreId: string
endpointUrl: string
path: string
schema: string
statementType: string
table: string
type: string
updateColumns:
- string
url: string
whereColumns:
- string
passthroughColumns:
- string
passthroughColumnsSet: string
predictionInstance:
apiKey: string
datarobotKey: string
hostName: string
sslEnabled: false
predictionThreshold: 0
predictionWarningEnabled: false
schedule:
dayOfMonths:
- string
dayOfWeeks:
- string
hours:
- string
minutes:
- string
months:
- string
skipDriftTracking: false
thresholdHigh: 0
thresholdLow: 0
timeseriesSettings:
forecastPoint: string
predictionsEndDate: string
predictionsStartDate: string
relaxKnownInAdvanceFeaturesCheck: false
type: string
BatchPredictionJobDefinition Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The BatchPredictionJobDefinition resource accepts the following input properties:
- Deployment
Id string - The ID of the deployment to use for the batch prediction job.
- Intake
Settings DataRobot Batch Prediction Job Definition Intake Settings - A dict configuring how data is coming from.
- Abort
On boolError - Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- Chunk
Size object - Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- Column
Names Dictionary<string, string>Remapping - Mapping with column renaming for output table.
- Csv
Settings DataRobot Batch Prediction Job Definition Csv Settings - CSV intake and output settings.
- Enabled bool
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- Explanation
Algorithm string - Which algorithm will be used to calculate prediction explanations.
- Include
Prediction boolStatus - Include the prediction_status column in the output. Defaults to False.
- Include
Probabilities bool - Flag that enables returning of all probability columns. Defaults to True.
- Include
Probabilities List<string>Classes - List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- Max
Explanations int - Compute prediction explanations for this amount of features.
- Name string
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- Num
Concurrent int - Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- Output
Settings DataRobot Batch Prediction Job Definition Output Settings - A dict configuring how scored data is to be saved.
- Passthrough
Columns List<string> - Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- Passthrough
Columns stringSet - To pass through every column from the scoring dataset, set this to all.
- Prediction
Instance DataRobot Batch Prediction Job Definition Prediction Instance - Defaults to instance specified by deployment or system configuration.
- Prediction
Threshold double - Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- Prediction
Warning boolEnabled - Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- Schedule
Data
Robot Batch Prediction Job Definition Schedule - Defines at what intervals the job should run.
- Skip
Drift boolTracking - Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- Threshold
High double - Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- Threshold
Low double - Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- Timeseries
Settings DataRobot Batch Prediction Job Definition Timeseries Settings - Configuration for time-series scoring.
- Deployment
Id string - The ID of the deployment to use for the batch prediction job.
- Intake
Settings BatchPrediction Job Definition Intake Settings Args - A dict configuring how data is coming from.
- Abort
On boolError - Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- Chunk
Size interface{} - Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- Column
Names map[string]stringRemapping - Mapping with column renaming for output table.
- Csv
Settings BatchPrediction Job Definition Csv Settings Args - CSV intake and output settings.
- Enabled bool
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- Explanation
Algorithm string - Which algorithm will be used to calculate prediction explanations.
- Include
Prediction boolStatus - Include the prediction_status column in the output. Defaults to False.
- Include
Probabilities bool - Flag that enables returning of all probability columns. Defaults to True.
- Include
Probabilities []stringClasses - List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- Max
Explanations int - Compute prediction explanations for this amount of features.
- Name string
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- Num
Concurrent int - Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- Output
Settings BatchPrediction Job Definition Output Settings Args - A dict configuring how scored data is to be saved.
- Passthrough
Columns []string - Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- Passthrough
Columns stringSet - To pass through every column from the scoring dataset, set this to all.
- Prediction
Instance BatchPrediction Job Definition Prediction Instance Args - Defaults to instance specified by deployment or system configuration.
- Prediction
Threshold float64 - Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- Prediction
Warning boolEnabled - Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- Schedule
Batch
Prediction Job Definition Schedule Args - Defines at what intervals the job should run.
- Skip
Drift boolTracking - Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- Threshold
High float64 - Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- Threshold
Low float64 - Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- Timeseries
Settings BatchPrediction Job Definition Timeseries Settings Args - Configuration for time-series scoring.
- deployment
Id String - The ID of the deployment to use for the batch prediction job.
- intake
Settings BatchPrediction Job Definition Intake Settings - A dict configuring how data is coming from.
- abort
On BooleanError - Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunk
Size Object - Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- column
Names Map<String,String>Remapping - Mapping with column renaming for output table.
- csv
Settings BatchPrediction Job Definition Csv Settings - CSV intake and output settings.
- enabled Boolean
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanation
Algorithm String - Which algorithm will be used to calculate prediction explanations.
- include
Prediction BooleanStatus - Include the prediction_status column in the output. Defaults to False.
- include
Probabilities Boolean - Flag that enables returning of all probability columns. Defaults to True.
- include
Probabilities List<String>Classes - List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- max
Explanations Integer - Compute prediction explanations for this amount of features.
- name String
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- num
Concurrent Integer - Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- output
Settings BatchPrediction Job Definition Output Settings - A dict configuring how scored data is to be saved.
- passthrough
Columns List<String> - Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthrough
Columns StringSet - To pass through every column from the scoring dataset, set this to all.
- prediction
Instance BatchPrediction Job Definition Prediction Instance - Defaults to instance specified by deployment or system configuration.
- prediction
Threshold Double - Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- prediction
Warning BooleanEnabled - Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule
Batch
Prediction Job Definition Schedule - Defines at what intervals the job should run.
- skip
Drift BooleanTracking - Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- threshold
High Double - Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- threshold
Low Double - Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseries
Settings BatchPrediction Job Definition Timeseries Settings - Configuration for time-series scoring.
- deployment
Id string - The ID of the deployment to use for the batch prediction job.
- intake
Settings BatchPrediction Job Definition Intake Settings - A dict configuring how data is coming from.
- abort
On booleanError - Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunk
Size any - Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- column
Names {[key: string]: string}Remapping - Mapping with column renaming for output table.
- csv
Settings BatchPrediction Job Definition Csv Settings - CSV intake and output settings.
- enabled boolean
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanation
Algorithm string - Which algorithm will be used to calculate prediction explanations.
- include
Prediction booleanStatus - Include the prediction_status column in the output. Defaults to False.
- include
Probabilities boolean - Flag that enables returning of all probability columns. Defaults to True.
- include
Probabilities string[]Classes - List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- max
Explanations number - Compute prediction explanations for this amount of features.
- name string
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- num
Concurrent number - Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- output
Settings BatchPrediction Job Definition Output Settings - A dict configuring how scored data is to be saved.
- passthrough
Columns string[] - Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthrough
Columns stringSet - To pass through every column from the scoring dataset, set this to all.
- prediction
Instance BatchPrediction Job Definition Prediction Instance - Defaults to instance specified by deployment or system configuration.
- prediction
Threshold number - Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- prediction
Warning booleanEnabled - Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule
Batch
Prediction Job Definition Schedule - Defines at what intervals the job should run.
- skip
Drift booleanTracking - Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- threshold
High number - Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- threshold
Low number - Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseries
Settings BatchPrediction Job Definition Timeseries Settings - Configuration for time-series scoring.
- deployment_
id str - The ID of the deployment to use for the batch prediction job.
- intake_
settings BatchPrediction Job Definition Intake Settings Args - A dict configuring how data is coming from.
- abort_
on_ boolerror - Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunk_
size Any - Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- column_
names_ Mapping[str, str]remapping - Mapping with column renaming for output table.
- csv_
settings BatchPrediction Job Definition Csv Settings Args - CSV intake and output settings.
- enabled bool
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanation_
algorithm str - Which algorithm will be used to calculate prediction explanations.
- include_
prediction_ boolstatus - Include the prediction_status column in the output. Defaults to False.
- include_
probabilities bool - Flag that enables returning of all probability columns. Defaults to True.
- include_
probabilities_ Sequence[str]classes - List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- max_
explanations int - Compute prediction explanations for this amount of features.
- name str
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- num_
concurrent int - Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- output_
settings BatchPrediction Job Definition Output Settings Args - A dict configuring how scored data is to be saved.
- passthrough_
columns Sequence[str] - Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthrough_
columns_ strset - To pass through every column from the scoring dataset, set this to all.
- prediction_
instance BatchPrediction Job Definition Prediction Instance Args - Defaults to instance specified by deployment or system configuration.
- prediction_
threshold float - Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- prediction_
warning_ boolenabled - Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule
Batch
Prediction Job Definition Schedule Args - Defines at what intervals the job should run.
- skip_
drift_ booltracking - Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- threshold_
high float - Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- threshold_
low float - Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseries_
settings BatchPrediction Job Definition Timeseries Settings Args - Configuration for time-series scoring.
- deployment
Id String - The ID of the deployment to use for the batch prediction job.
- intake
Settings Property Map - A dict configuring how data is coming from.
- abort
On BooleanError - Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunk
Size Any - Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- column
Names Map<String>Remapping - Mapping with column renaming for output table.
- csv
Settings Property Map - CSV intake and output settings.
- enabled Boolean
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanation
Algorithm String - Which algorithm will be used to calculate prediction explanations.
- include
Prediction BooleanStatus - Include the prediction_status column in the output. Defaults to False.
- include
Probabilities Boolean - Flag that enables returning of all probability columns. Defaults to True.
- include
Probabilities List<String>Classes - List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- max
Explanations Number - Compute prediction explanations for this amount of features.
- name String
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- num
Concurrent Number - Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- output
Settings Property Map - A dict configuring how scored data is to be saved.
- passthrough
Columns List<String> - Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthrough
Columns StringSet - To pass through every column from the scoring dataset, set this to all.
- prediction
Instance Property Map - Defaults to instance specified by deployment or system configuration.
- prediction
Threshold Number - Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- prediction
Warning BooleanEnabled - Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule Property Map
- Defines at what intervals the job should run.
- skip
Drift BooleanTracking - Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- threshold
High Number - Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- threshold
Low Number - Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseries
Settings Property Map - Configuration for time-series scoring.
Outputs
All input properties are implicitly available as output properties. Additionally, the BatchPredictionJobDefinition resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing BatchPredictionJobDefinition Resource
Get an existing BatchPredictionJobDefinition resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: BatchPredictionJobDefinitionState, opts?: CustomResourceOptions): BatchPredictionJobDefinition
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
abort_on_error: Optional[bool] = None,
chunk_size: Optional[Any] = None,
column_names_remapping: Optional[Mapping[str, str]] = None,
csv_settings: Optional[BatchPredictionJobDefinitionCsvSettingsArgs] = None,
deployment_id: Optional[str] = None,
enabled: Optional[bool] = None,
explanation_algorithm: Optional[str] = None,
include_prediction_status: Optional[bool] = None,
include_probabilities: Optional[bool] = None,
include_probabilities_classes: Optional[Sequence[str]] = None,
intake_settings: Optional[BatchPredictionJobDefinitionIntakeSettingsArgs] = None,
max_explanations: Optional[int] = None,
name: Optional[str] = None,
num_concurrent: Optional[int] = None,
output_settings: Optional[BatchPredictionJobDefinitionOutputSettingsArgs] = None,
passthrough_columns: Optional[Sequence[str]] = None,
passthrough_columns_set: Optional[str] = None,
prediction_instance: Optional[BatchPredictionJobDefinitionPredictionInstanceArgs] = None,
prediction_threshold: Optional[float] = None,
prediction_warning_enabled: Optional[bool] = None,
schedule: Optional[BatchPredictionJobDefinitionScheduleArgs] = None,
skip_drift_tracking: Optional[bool] = None,
threshold_high: Optional[float] = None,
threshold_low: Optional[float] = None,
timeseries_settings: Optional[BatchPredictionJobDefinitionTimeseriesSettingsArgs] = None) -> BatchPredictionJobDefinition
func GetBatchPredictionJobDefinition(ctx *Context, name string, id IDInput, state *BatchPredictionJobDefinitionState, opts ...ResourceOption) (*BatchPredictionJobDefinition, error)
public static BatchPredictionJobDefinition Get(string name, Input<string> id, BatchPredictionJobDefinitionState? state, CustomResourceOptions? opts = null)
public static BatchPredictionJobDefinition get(String name, Output<String> id, BatchPredictionJobDefinitionState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Abort
On boolError - Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- Chunk
Size object - Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- Column
Names Dictionary<string, string>Remapping - Mapping with column renaming for output table.
- Csv
Settings DataRobot Batch Prediction Job Definition Csv Settings - CSV intake and output settings.
- Deployment
Id string - The ID of the deployment to use for the batch prediction job.
- Enabled bool
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- Explanation
Algorithm string - Which algorithm will be used to calculate prediction explanations.
- Include
Prediction boolStatus - Include the prediction_status column in the output. Defaults to False.
- Include
Probabilities bool - Flag that enables returning of all probability columns. Defaults to True.
- Include
Probabilities List<string>Classes - List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- Intake
Settings DataRobot Batch Prediction Job Definition Intake Settings - A dict configuring how data is coming from.
- Max
Explanations int - Compute prediction explanations for this amount of features.
- Name string
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- Num
Concurrent int - Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- Output
Settings DataRobot Batch Prediction Job Definition Output Settings - A dict configuring how scored data is to be saved.
- Passthrough
Columns List<string> - Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- Passthrough
Columns stringSet - To pass through every column from the scoring dataset, set this to all.
- Prediction
Instance DataRobot Batch Prediction Job Definition Prediction Instance - Defaults to instance specified by deployment or system configuration.
- Prediction
Threshold double - Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- Prediction
Warning boolEnabled - Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- Schedule
Data
Robot Batch Prediction Job Definition Schedule - Defines at what intervals the job should run.
- Skip
Drift boolTracking - Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- Threshold
High double - Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- Threshold
Low double - Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- Timeseries
Settings DataRobot Batch Prediction Job Definition Timeseries Settings - Configuration for time-series scoring.
- Abort
On boolError - Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- Chunk
Size interface{} - Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- Column
Names map[string]stringRemapping - Mapping with column renaming for output table.
- Csv
Settings BatchPrediction Job Definition Csv Settings Args - CSV intake and output settings.
- Deployment
Id string - The ID of the deployment to use for the batch prediction job.
- Enabled bool
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- Explanation
Algorithm string - Which algorithm will be used to calculate prediction explanations.
- Include
Prediction boolStatus - Include the prediction_status column in the output. Defaults to False.
- Include
Probabilities bool - Flag that enables returning of all probability columns. Defaults to True.
- Include
Probabilities []stringClasses - List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- Intake
Settings BatchPrediction Job Definition Intake Settings Args - A dict configuring how data is coming from.
- Max
Explanations int - Compute prediction explanations for this amount of features.
- Name string
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- Num
Concurrent int - Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- Output
Settings BatchPrediction Job Definition Output Settings Args - A dict configuring how scored data is to be saved.
- Passthrough
Columns []string - Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- Passthrough
Columns stringSet - To pass through every column from the scoring dataset, set this to all.
- Prediction
Instance BatchPrediction Job Definition Prediction Instance Args - Defaults to instance specified by deployment or system configuration.
- Prediction
Threshold float64 - Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- Prediction
Warning boolEnabled - Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- Schedule
Batch
Prediction Job Definition Schedule Args - Defines at what intervals the job should run.
- Skip
Drift boolTracking - Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- Threshold
High float64 - Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- Threshold
Low float64 - Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- Timeseries
Settings BatchPrediction Job Definition Timeseries Settings Args - Configuration for time-series scoring.
- abort
On BooleanError - Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunk
Size Object - Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- column
Names Map<String,String>Remapping - Mapping with column renaming for output table.
- csv
Settings BatchPrediction Job Definition Csv Settings - CSV intake and output settings.
- deployment
Id String - The ID of the deployment to use for the batch prediction job.
- enabled Boolean
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanation
Algorithm String - Which algorithm will be used to calculate prediction explanations.
- include
Prediction BooleanStatus - Include the prediction_status column in the output. Defaults to False.
- include
Probabilities Boolean - Flag that enables returning of all probability columns. Defaults to True.
- include
Probabilities List<String>Classes - List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- intake
Settings BatchPrediction Job Definition Intake Settings - A dict configuring how data is coming from.
- max
Explanations Integer - Compute prediction explanations for this amount of features.
- name String
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- num
Concurrent Integer - Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- output
Settings BatchPrediction Job Definition Output Settings - A dict configuring how scored data is to be saved.
- passthrough
Columns List<String> - Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthrough
Columns StringSet - To pass through every column from the scoring dataset, set this to all.
- prediction
Instance BatchPrediction Job Definition Prediction Instance - Defaults to instance specified by deployment or system configuration.
- prediction
Threshold Double - Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- prediction
Warning BooleanEnabled - Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule
Batch
Prediction Job Definition Schedule - Defines at what intervals the job should run.
- skip
Drift BooleanTracking - Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- threshold
High Double - Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- threshold
Low Double - Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseries
Settings BatchPrediction Job Definition Timeseries Settings - Configuration for time-series scoring.
- abort
On booleanError - Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunk
Size any - Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- column
Names {[key: string]: string}Remapping - Mapping with column renaming for output table.
- csv
Settings BatchPrediction Job Definition Csv Settings - CSV intake and output settings.
- deployment
Id string - The ID of the deployment to use for the batch prediction job.
- enabled boolean
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanation
Algorithm string - Which algorithm will be used to calculate prediction explanations.
- include
Prediction booleanStatus - Include the prediction_status column in the output. Defaults to False.
- include
Probabilities boolean - Flag that enables returning of all probability columns. Defaults to True.
- include
Probabilities string[]Classes - List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- intake
Settings BatchPrediction Job Definition Intake Settings - A dict configuring how data is coming from.
- max
Explanations number - Compute prediction explanations for this amount of features.
- name string
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- num
Concurrent number - Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- output
Settings BatchPrediction Job Definition Output Settings - A dict configuring how scored data is to be saved.
- passthrough
Columns string[] - Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthrough
Columns stringSet - To pass through every column from the scoring dataset, set this to all.
- prediction
Instance BatchPrediction Job Definition Prediction Instance - Defaults to instance specified by deployment or system configuration.
- prediction
Threshold number - Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- prediction
Warning booleanEnabled - Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule
Batch
Prediction Job Definition Schedule - Defines at what intervals the job should run.
- skip
Drift booleanTracking - Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- threshold
High number - Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- threshold
Low number - Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseries
Settings BatchPrediction Job Definition Timeseries Settings - Configuration for time-series scoring.
- abort_
on_ boolerror - Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunk_
size Any - Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- column_
names_ Mapping[str, str]remapping - Mapping with column renaming for output table.
- csv_
settings BatchPrediction Job Definition Csv Settings Args - CSV intake and output settings.
- deployment_
id str - The ID of the deployment to use for the batch prediction job.
- enabled bool
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanation_
algorithm str - Which algorithm will be used to calculate prediction explanations.
- include_
prediction_ boolstatus - Include the prediction_status column in the output. Defaults to False.
- include_
probabilities bool - Flag that enables returning of all probability columns. Defaults to True.
- include_
probabilities_ Sequence[str]classes - List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- intake_
settings BatchPrediction Job Definition Intake Settings Args - A dict configuring how data is coming from.
- max_
explanations int - Compute prediction explanations for this amount of features.
- name str
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- num_
concurrent int - Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- output_
settings BatchPrediction Job Definition Output Settings Args - A dict configuring how scored data is to be saved.
- passthrough_
columns Sequence[str] - Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthrough_
columns_ strset - To pass through every column from the scoring dataset, set this to all.
- prediction_
instance BatchPrediction Job Definition Prediction Instance Args - Defaults to instance specified by deployment or system configuration.
- prediction_
threshold float - Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- prediction_
warning_ boolenabled - Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule
Batch
Prediction Job Definition Schedule Args - Defines at what intervals the job should run.
- skip_
drift_ booltracking - Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- threshold_
high float - Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- threshold_
low float - Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseries_
settings BatchPrediction Job Definition Timeseries Settings Args - Configuration for time-series scoring.
- abort
On BooleanError - Default behavior is to abort the job if too many rows fail scoring. This will free up resources for other jobs that may score successfully. Set to false to unconditionally score every row no matter how many errors are encountered. Defaults to True.
- chunk
Size Any - Which strategy should be used to determine the chunk size. Can be either a named strategy or a fixed size in bytes.
- column
Names Map<String>Remapping - Mapping with column renaming for output table.
- csv
Settings Property Map - CSV intake and output settings.
- deployment
Id String - The ID of the deployment to use for the batch prediction job.
- enabled Boolean
- Whether or not the job definition should be active on a scheduled basis. If True, schedule is required.
- explanation
Algorithm String - Which algorithm will be used to calculate prediction explanations.
- include
Prediction BooleanStatus - Include the prediction_status column in the output. Defaults to False.
- include
Probabilities Boolean - Flag that enables returning of all probability columns. Defaults to True.
- include
Probabilities List<String>Classes - List the subset of classes if a user doesn’t want all the classes. Defaults to [].
- intake
Settings Property Map - A dict configuring how data is coming from.
- max
Explanations Number - Compute prediction explanations for this amount of features.
- name String
- The name you want your job to be identified with. Must be unique across the organization’s existing jobs.
- num
Concurrent Number - Number of concurrent chunks to score simultaneously. Defaults to the available number of cores of the deployment. Lower it to leave resources for real-time scoring.
- output
Settings Property Map - A dict configuring how scored data is to be saved.
- passthrough
Columns List<String> - Keep these columns from the scoring dataset in the scored dataset. This is useful for correlating predictions with source data.
- passthrough
Columns StringSet - To pass through every column from the scoring dataset, set this to all.
- prediction
Instance Property Map - Defaults to instance specified by deployment or system configuration.
- prediction
Threshold Number - Threshold is the point that sets the class boundary for a predicted value. This value can be set between 0.0 and 1.0.
- prediction
Warning BooleanEnabled - Add prediction warnings to the scored data. Currently only supported for regression models. Defaults to False.
- schedule Property Map
- Defines at what intervals the job should run.
- skip
Drift BooleanTracking - Skips drift tracking on any predictions made from this job. This is useful when running non-production workloads to not affect drift tracking and cause unnecessary alerts. Defaults to false.
- threshold
High Number - Only compute prediction explanations for predictions above this threshold. Can be combined with threshold_low.
- threshold
Low Number - Only compute prediction explanations for predictions below this threshold. Can be combined with threshold_high.
- timeseries
Settings Property Map - Configuration for time-series scoring.
Supporting Types
BatchPredictionJobDefinitionCsvSettings, BatchPredictionJobDefinitionCsvSettingsArgs
BatchPredictionJobDefinitionIntakeSettings, BatchPredictionJobDefinitionIntakeSettingsArgs
- Type string
- Type of data source.
- Catalog string
- The name of specified database catalog for JDBC type.
- Credential
Id string - The ID of the credentials for S3 or JDBC data source.
- Data
Store stringId - The ID of the external data store connected to the JDBC data source.
- Dataset
Id string - The ID of the dataset to score for dataset type.
- Endpoint
Url string - Any non-default endpoint URL for S3 access.
- Fetch
Size int - Changing the fetchSize can be used to balance throughput and memory usage for JDBC type.
- File string
- String path to file of scoring data for localFile type.
- Query string
- A self-supplied SELECT statement of the data set you wish to predict for JDBC type.
- Schema string
- The name of specified database schema for JDBC type.
- Table string
- The name of specified database table for JDBC type.
- Url string
- The URL to score (e.g.: s3://bucket/key) for S3 type.
- Type string
- Type of data source.
- Catalog string
- The name of specified database catalog for JDBC type.
- Credential
Id string - The ID of the credentials for S3 or JDBC data source.
- Data
Store stringId - The ID of the external data store connected to the JDBC data source.
- Dataset
Id string - The ID of the dataset to score for dataset type.
- Endpoint
Url string - Any non-default endpoint URL for S3 access.
- Fetch
Size int - Changing the fetchSize can be used to balance throughput and memory usage for JDBC type.
- File string
- String path to file of scoring data for localFile type.
- Query string
- A self-supplied SELECT statement of the data set you wish to predict for JDBC type.
- Schema string
- The name of specified database schema for JDBC type.
- Table string
- The name of specified database table for JDBC type.
- Url string
- The URL to score (e.g.: s3://bucket/key) for S3 type.
- type String
- Type of data source.
- catalog String
- The name of specified database catalog for JDBC type.
- credential
Id String - The ID of the credentials for S3 or JDBC data source.
- data
Store StringId - The ID of the external data store connected to the JDBC data source.
- dataset
Id String - The ID of the dataset to score for dataset type.
- endpoint
Url String - Any non-default endpoint URL for S3 access.
- fetch
Size Integer - Changing the fetchSize can be used to balance throughput and memory usage for JDBC type.
- file String
- String path to file of scoring data for localFile type.
- query String
- A self-supplied SELECT statement of the data set you wish to predict for JDBC type.
- schema String
- The name of specified database schema for JDBC type.
- table String
- The name of specified database table for JDBC type.
- url String
- The URL to score (e.g.: s3://bucket/key) for S3 type.
- type string
- Type of data source.
- catalog string
- The name of specified database catalog for JDBC type.
- credential
Id string - The ID of the credentials for S3 or JDBC data source.
- data
Store stringId - The ID of the external data store connected to the JDBC data source.
- dataset
Id string - The ID of the dataset to score for dataset type.
- endpoint
Url string - Any non-default endpoint URL for S3 access.
- fetch
Size number - Changing the fetchSize can be used to balance throughput and memory usage for JDBC type.
- file string
- String path to file of scoring data for localFile type.
- query string
- A self-supplied SELECT statement of the data set you wish to predict for JDBC type.
- schema string
- The name of specified database schema for JDBC type.
- table string
- The name of specified database table for JDBC type.
- url string
- The URL to score (e.g.: s3://bucket/key) for S3 type.
- type str
- Type of data source.
- catalog str
- The name of specified database catalog for JDBC type.
- credential_
id str - The ID of the credentials for S3 or JDBC data source.
- data_
store_ strid - The ID of the external data store connected to the JDBC data source.
- dataset_
id str - The ID of the dataset to score for dataset type.
- endpoint_
url str - Any non-default endpoint URL for S3 access.
- fetch_
size int - Changing the fetchSize can be used to balance throughput and memory usage for JDBC type.
- file str
- String path to file of scoring data for localFile type.
- query str
- A self-supplied SELECT statement of the data set you wish to predict for JDBC type.
- schema str
- The name of specified database schema for JDBC type.
- table str
- The name of specified database table for JDBC type.
- url str
- The URL to score (e.g.: s3://bucket/key) for S3 type.
- type String
- Type of data source.
- catalog String
- The name of specified database catalog for JDBC type.
- credential
Id String - The ID of the credentials for S3 or JDBC data source.
- data
Store StringId - The ID of the external data store connected to the JDBC data source.
- dataset
Id String - The ID of the dataset to score for dataset type.
- endpoint
Url String - Any non-default endpoint URL for S3 access.
- fetch
Size Number - Changing the fetchSize can be used to balance throughput and memory usage for JDBC type.
- file String
- String path to file of scoring data for localFile type.
- query String
- A self-supplied SELECT statement of the data set you wish to predict for JDBC type.
- schema String
- The name of specified database schema for JDBC type.
- table String
- The name of specified database table for JDBC type.
- url String
- The URL to score (e.g.: s3://bucket/key) for S3 type.
BatchPredictionJobDefinitionOutputSettings, BatchPredictionJobDefinitionOutputSettingsArgs
- Catalog string
- The name of specified database catalog for JDBC type.
- Create
Table boolIf Not Exists - If no existing table is detected, attempt to create it before writing data for JDBC type.
- Credential
Id string - The ID of the credentials for S3 or JDBC data source.
- Data
Store stringId - The ID of the external data store connected to the JDBC data source.
- Endpoint
Url string - Any non-default endpoint URL for S3 access.
- Path string
- Path to save the scored data as CSV for localFile type.
- Schema string
- The name of specified database schema for JDBC type.
- Statement
Type string - The type of insertion statement to create for JDBC type.
- Table string
- The name of specified database table for JDBC type.
- Type string
- Type of output.
- Update
Columns List<string> - A list of strings containing those column names to be updated for JDBC type.
- Url string
- The URL for storing the results (e.g.: s3://bucket/key) for S3 type.
- Where
Columns List<string> - A list of strings containing those column names to be selected for JDBC type.
- Catalog string
- The name of specified database catalog for JDBC type.
- Create
Table boolIf Not Exists - If no existing table is detected, attempt to create it before writing data for JDBC type.
- Credential
Id string - The ID of the credentials for S3 or JDBC data source.
- Data
Store stringId - The ID of the external data store connected to the JDBC data source.
- Endpoint
Url string - Any non-default endpoint URL for S3 access.
- Path string
- Path to save the scored data as CSV for localFile type.
- Schema string
- The name of specified database schema for JDBC type.
- Statement
Type string - The type of insertion statement to create for JDBC type.
- Table string
- The name of specified database table for JDBC type.
- Type string
- Type of output.
- Update
Columns []string - A list of strings containing those column names to be updated for JDBC type.
- Url string
- The URL for storing the results (e.g.: s3://bucket/key) for S3 type.
- Where
Columns []string - A list of strings containing those column names to be selected for JDBC type.
- catalog String
- The name of specified database catalog for JDBC type.
- create
Table BooleanIf Not Exists - If no existing table is detected, attempt to create it before writing data for JDBC type.
- credential
Id String - The ID of the credentials for S3 or JDBC data source.
- data
Store StringId - The ID of the external data store connected to the JDBC data source.
- endpoint
Url String - Any non-default endpoint URL for S3 access.
- path String
- Path to save the scored data as CSV for localFile type.
- schema String
- The name of specified database schema for JDBC type.
- statement
Type String - The type of insertion statement to create for JDBC type.
- table String
- The name of specified database table for JDBC type.
- type String
- Type of output.
- update
Columns List<String> - A list of strings containing those column names to be updated for JDBC type.
- url String
- The URL for storing the results (e.g.: s3://bucket/key) for S3 type.
- where
Columns List<String> - A list of strings containing those column names to be selected for JDBC type.
- catalog string
- The name of specified database catalog for JDBC type.
- create
Table booleanIf Not Exists - If no existing table is detected, attempt to create it before writing data for JDBC type.
- credential
Id string - The ID of the credentials for S3 or JDBC data source.
- data
Store stringId - The ID of the external data store connected to the JDBC data source.
- endpoint
Url string - Any non-default endpoint URL for S3 access.
- path string
- Path to save the scored data as CSV for localFile type.
- schema string
- The name of specified database schema for JDBC type.
- statement
Type string - The type of insertion statement to create for JDBC type.
- table string
- The name of specified database table for JDBC type.
- type string
- Type of output.
- update
Columns string[] - A list of strings containing those column names to be updated for JDBC type.
- url string
- The URL for storing the results (e.g.: s3://bucket/key) for S3 type.
- where
Columns string[] - A list of strings containing those column names to be selected for JDBC type.
- catalog str
- The name of specified database catalog for JDBC type.
- create_
table_ boolif_ not_ exists - If no existing table is detected, attempt to create it before writing data for JDBC type.
- credential_
id str - The ID of the credentials for S3 or JDBC data source.
- data_
store_ strid - The ID of the external data store connected to the JDBC data source.
- endpoint_
url str - Any non-default endpoint URL for S3 access.
- path str
- Path to save the scored data as CSV for localFile type.
- schema str
- The name of specified database schema for JDBC type.
- statement_
type str - The type of insertion statement to create for JDBC type.
- table str
- The name of specified database table for JDBC type.
- type str
- Type of output.
- update_
columns Sequence[str] - A list of strings containing those column names to be updated for JDBC type.
- url str
- The URL for storing the results (e.g.: s3://bucket/key) for S3 type.
- where_
columns Sequence[str] - A list of strings containing those column names to be selected for JDBC type.
- catalog String
- The name of specified database catalog for JDBC type.
- create
Table BooleanIf Not Exists - If no existing table is detected, attempt to create it before writing data for JDBC type.
- credential
Id String - The ID of the credentials for S3 or JDBC data source.
- data
Store StringId - The ID of the external data store connected to the JDBC data source.
- endpoint
Url String - Any non-default endpoint URL for S3 access.
- path String
- Path to save the scored data as CSV for localFile type.
- schema String
- The name of specified database schema for JDBC type.
- statement
Type String - The type of insertion statement to create for JDBC type.
- table String
- The name of specified database table for JDBC type.
- type String
- Type of output.
- update
Columns List<String> - A list of strings containing those column names to be updated for JDBC type.
- url String
- The URL for storing the results (e.g.: s3://bucket/key) for S3 type.
- where
Columns List<String> - A list of strings containing those column names to be selected for JDBC type.
BatchPredictionJobDefinitionPredictionInstance, BatchPredictionJobDefinitionPredictionInstanceArgs
- Host
Name string - Hostname of the prediction instance.
- Api
Key string - By default, prediction requests will use the API key of the user that created the job. This allows you to make requests on behalf of other users.
- Datarobot
Key string - If running a job against a prediction instance in the Managed AI Cloud, you must provide the organization level DataRobot-Key.
- Ssl
Enabled bool - Set to false to run prediction requests from the batch prediction job without SSL. Defaults to true.
- Host
Name string - Hostname of the prediction instance.
- Api
Key string - By default, prediction requests will use the API key of the user that created the job. This allows you to make requests on behalf of other users.
- Datarobot
Key string - If running a job against a prediction instance in the Managed AI Cloud, you must provide the organization level DataRobot-Key.
- Ssl
Enabled bool - Set to false to run prediction requests from the batch prediction job without SSL. Defaults to true.
- host
Name String - Hostname of the prediction instance.
- api
Key String - By default, prediction requests will use the API key of the user that created the job. This allows you to make requests on behalf of other users.
- datarobot
Key String - If running a job against a prediction instance in the Managed AI Cloud, you must provide the organization level DataRobot-Key.
- ssl
Enabled Boolean - Set to false to run prediction requests from the batch prediction job without SSL. Defaults to true.
- host
Name string - Hostname of the prediction instance.
- api
Key string - By default, prediction requests will use the API key of the user that created the job. This allows you to make requests on behalf of other users.
- datarobot
Key string - If running a job against a prediction instance in the Managed AI Cloud, you must provide the organization level DataRobot-Key.
- ssl
Enabled boolean - Set to false to run prediction requests from the batch prediction job without SSL. Defaults to true.
- host_
name str - Hostname of the prediction instance.
- api_
key str - By default, prediction requests will use the API key of the user that created the job. This allows you to make requests on behalf of other users.
- datarobot_
key str - If running a job against a prediction instance in the Managed AI Cloud, you must provide the organization level DataRobot-Key.
- ssl_
enabled bool - Set to false to run prediction requests from the batch prediction job without SSL. Defaults to true.
- host
Name String - Hostname of the prediction instance.
- api
Key String - By default, prediction requests will use the API key of the user that created the job. This allows you to make requests on behalf of other users.
- datarobot
Key String - If running a job against a prediction instance in the Managed AI Cloud, you must provide the organization level DataRobot-Key.
- ssl
Enabled Boolean - Set to false to run prediction requests from the batch prediction job without SSL. Defaults to true.
BatchPredictionJobDefinitionSchedule, BatchPredictionJobDefinitionScheduleArgs
- Day
Of List<string>Months - Days of the month when the job will run.
- Day
Of List<string>Weeks - Days of the week when the job will run.
- Hours List<string>
- Hours of the day when the job will run.
- Minutes List<string>
- Minutes of the day when the job will run.
- Months List<string>
- Months of the year when the job will run.
- Day
Of []stringMonths - Days of the month when the job will run.
- Day
Of []stringWeeks - Days of the week when the job will run.
- Hours []string
- Hours of the day when the job will run.
- Minutes []string
- Minutes of the day when the job will run.
- Months []string
- Months of the year when the job will run.
- day
Of List<String>Months - Days of the month when the job will run.
- day
Of List<String>Weeks - Days of the week when the job will run.
- hours List<String>
- Hours of the day when the job will run.
- minutes List<String>
- Minutes of the day when the job will run.
- months List<String>
- Months of the year when the job will run.
- day
Of string[]Months - Days of the month when the job will run.
- day
Of string[]Weeks - Days of the week when the job will run.
- hours string[]
- Hours of the day when the job will run.
- minutes string[]
- Minutes of the day when the job will run.
- months string[]
- Months of the year when the job will run.
- day_
of_ Sequence[str]months - Days of the month when the job will run.
- day_
of_ Sequence[str]weeks - Days of the week when the job will run.
- hours Sequence[str]
- Hours of the day when the job will run.
- minutes Sequence[str]
- Minutes of the day when the job will run.
- months Sequence[str]
- Months of the year when the job will run.
- day
Of List<String>Months - Days of the month when the job will run.
- day
Of List<String>Weeks - Days of the week when the job will run.
- hours List<String>
- Hours of the day when the job will run.
- minutes List<String>
- Minutes of the day when the job will run.
- months List<String>
- Months of the year when the job will run.
BatchPredictionJobDefinitionTimeseriesSettings, BatchPredictionJobDefinitionTimeseriesSettingsArgs
- Forecast
Point string - Forecast point for the dataset, used for the forecast predictions. May be passed if timeseries_settings.type=forecast.
- Predictions
End stringDate - End date for historical predictions. May be passed if timeseries_settings.type=historical.
- Predictions
Start stringDate - Start date for historical predictions. May be passed if timeseries_settings.type=historical.
- Relax
Known boolIn Advance Features Check - If True, missing values in the known in advance features are allowed in the forecast window at the prediction time. Default is False.
- Type string
- Type of time-series prediction. Must be 'forecast' or 'historical'. Default is 'forecast'.
- Forecast
Point string - Forecast point for the dataset, used for the forecast predictions. May be passed if timeseries_settings.type=forecast.
- Predictions
End stringDate - End date for historical predictions. May be passed if timeseries_settings.type=historical.
- Predictions
Start stringDate - Start date for historical predictions. May be passed if timeseries_settings.type=historical.
- Relax
Known boolIn Advance Features Check - If True, missing values in the known in advance features are allowed in the forecast window at the prediction time. Default is False.
- Type string
- Type of time-series prediction. Must be 'forecast' or 'historical'. Default is 'forecast'.
- forecast
Point String - Forecast point for the dataset, used for the forecast predictions. May be passed if timeseries_settings.type=forecast.
- predictions
End StringDate - End date for historical predictions. May be passed if timeseries_settings.type=historical.
- predictions
Start StringDate - Start date for historical predictions. May be passed if timeseries_settings.type=historical.
- relax
Known BooleanIn Advance Features Check - If True, missing values in the known in advance features are allowed in the forecast window at the prediction time. Default is False.
- type String
- Type of time-series prediction. Must be 'forecast' or 'historical'. Default is 'forecast'.
- forecast
Point string - Forecast point for the dataset, used for the forecast predictions. May be passed if timeseries_settings.type=forecast.
- predictions
End stringDate - End date for historical predictions. May be passed if timeseries_settings.type=historical.
- predictions
Start stringDate - Start date for historical predictions. May be passed if timeseries_settings.type=historical.
- relax
Known booleanIn Advance Features Check - If True, missing values in the known in advance features are allowed in the forecast window at the prediction time. Default is False.
- type string
- Type of time-series prediction. Must be 'forecast' or 'historical'. Default is 'forecast'.
- forecast_
point str - Forecast point for the dataset, used for the forecast predictions. May be passed if timeseries_settings.type=forecast.
- predictions_
end_ strdate - End date for historical predictions. May be passed if timeseries_settings.type=historical.
- predictions_
start_ strdate - Start date for historical predictions. May be passed if timeseries_settings.type=historical.
- relax_
known_ boolin_ advance_ features_ check - If True, missing values in the known in advance features are allowed in the forecast window at the prediction time. Default is False.
- type str
- Type of time-series prediction. Must be 'forecast' or 'historical'. Default is 'forecast'.
- forecast
Point String - Forecast point for the dataset, used for the forecast predictions. May be passed if timeseries_settings.type=forecast.
- predictions
End StringDate - End date for historical predictions. May be passed if timeseries_settings.type=historical.
- predictions
Start StringDate - Start date for historical predictions. May be passed if timeseries_settings.type=historical.
- relax
Known BooleanIn Advance Features Check - If True, missing values in the known in advance features are allowed in the forecast window at the prediction time. Default is False.
- type String
- Type of time-series prediction. Must be 'forecast' or 'historical'. Default is 'forecast'.
Package Details
- Repository
- datarobot datarobot-community/pulumi-datarobot
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
datarobot
Terraform Provider.