1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
impl super::Client {
    /// Constructs a fluent builder for the [`CreateMLEndpoint`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`id(impl Into<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::id) / [`set_id(Option<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::set_id):<br>required: **false**<br><p>A unique identifier for the new inference endpoint. The default is an autogenerated timestamped name.</p><br>
    ///   - [`ml_model_training_job_id(impl Into<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::ml_model_training_job_id) / [`set_ml_model_training_job_id(Option<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::set_ml_model_training_job_id):<br>required: **false**<br><p>The job Id of the completed model-training job that has created the model that the inference endpoint will point to. You must supply either the <code>mlModelTrainingJobId</code> or the <code>mlModelTransformJobId</code>.</p><br>
    ///   - [`ml_model_transform_job_id(impl Into<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::ml_model_transform_job_id) / [`set_ml_model_transform_job_id(Option<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::set_ml_model_transform_job_id):<br>required: **false**<br><p>The job Id of the completed model-transform job. You must supply either the <code>mlModelTrainingJobId</code> or the <code>mlModelTransformJobId</code>.</p><br>
    ///   - [`update(bool)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::update) / [`set_update(Option<bool>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::set_update):<br>required: **false**<br><p>If set to <code>true</code>, <code>update</code> indicates that this is an update request. The default is <code>false</code>. You must supply either the <code>mlModelTrainingJobId</code> or the <code>mlModelTransformJobId</code>.</p><br>
    ///   - [`neptune_iam_role_arn(impl Into<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::neptune_iam_role_arn) / [`set_neptune_iam_role_arn(Option<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::set_neptune_iam_role_arn):<br>required: **false**<br><p>The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will be thrown.</p><br>
    ///   - [`model_name(impl Into<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::model_name) / [`set_model_name(Option<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::set_model_name):<br>required: **false**<br><p>Model type for training. By default the Neptune ML model is automatically based on the <code>modelType</code> used in data processing, but you can specify a different model type here. The default is <code>rgcn</code> for heterogeneous graphs and <code>kge</code> for knowledge graphs. The only valid value for heterogeneous graphs is <code>rgcn</code>. Valid values for knowledge graphs are: <code>kge</code>, <code>transe</code>, <code>distmult</code>, and <code>rotate</code>.</p><br>
    ///   - [`instance_type(impl Into<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::instance_type) / [`set_instance_type(Option<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::set_instance_type):<br>required: **false**<br><p>The type of Neptune ML instance to use for online servicing. The default is <code>ml.m5.xlarge</code>. Choosing the ML instance for an inference endpoint depends on the task type, the graph size, and your budget.</p><br>
    ///   - [`instance_count(i32)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::instance_count) / [`set_instance_count(Option<i32>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::set_instance_count):<br>required: **false**<br><p>The minimum number of Amazon EC2 instances to deploy to an endpoint for prediction. The default is 1</p><br>
    ///   - [`volume_encryption_kms_key(impl Into<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::volume_encryption_kms_key) / [`set_volume_encryption_kms_key(Option<String>)`](crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::set_volume_encryption_kms_key):<br>required: **false**<br><p>The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.</p><br>
    /// - On success, responds with [`CreateMlEndpointOutput`](crate::operation::create_ml_endpoint::CreateMlEndpointOutput) with field(s):
    ///   - [`id(Option<String>)`](crate::operation::create_ml_endpoint::CreateMlEndpointOutput::id): <p>The unique ID of the new inference endpoint.</p>
    ///   - [`arn(Option<String>)`](crate::operation::create_ml_endpoint::CreateMlEndpointOutput::arn): <p>The ARN for the new inference endpoint.</p>
    ///   - [`creation_time_in_millis(Option<i64>)`](crate::operation::create_ml_endpoint::CreateMlEndpointOutput::creation_time_in_millis): <p>The endpoint creation time, in milliseconds.</p>
    /// - On failure, responds with [`SdkError<CreateMLEndpointError>`](crate::operation::create_ml_endpoint::CreateMLEndpointError)
    pub fn create_ml_endpoint(&self) -> crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder {
        crate::operation::create_ml_endpoint::builders::CreateMLEndpointFluentBuilder::new(self.handle.clone())
    }
}