aws_sdk_bedrock/operation/create_automated_reasoning_policy_test_case/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::create_automated_reasoning_policy_test_case::_create_automated_reasoning_policy_test_case_output::CreateAutomatedReasoningPolicyTestCaseOutputBuilder;
3
4pub use crate::operation::create_automated_reasoning_policy_test_case::_create_automated_reasoning_policy_test_case_input::CreateAutomatedReasoningPolicyTestCaseInputBuilder;
5
6impl crate::operation::create_automated_reasoning_policy_test_case::builders::CreateAutomatedReasoningPolicyTestCaseInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::create_automated_reasoning_policy_test_case::CreateAutomatedReasoningPolicyTestCaseOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::create_automated_reasoning_policy_test_case::CreateAutomatedReasoningPolicyTestCaseError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.create_automated_reasoning_policy_test_case();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `CreateAutomatedReasoningPolicyTestCase`.
24///
25/// <p>Creates a test for an Automated Reasoning policy. Tests validate that your policy works as expected by providing sample inputs and expected outcomes. Use tests to verify policy behavior before deploying to production.</p>
26#[derive(::std::clone::Clone, ::std::fmt::Debug)]
27pub struct CreateAutomatedReasoningPolicyTestCaseFluentBuilder {
28    handle: ::std::sync::Arc<crate::client::Handle>,
29    inner: crate::operation::create_automated_reasoning_policy_test_case::builders::CreateAutomatedReasoningPolicyTestCaseInputBuilder,
30    config_override: ::std::option::Option<crate::config::Builder>,
31}
32impl
33    crate::client::customize::internal::CustomizableSend<
34        crate::operation::create_automated_reasoning_policy_test_case::CreateAutomatedReasoningPolicyTestCaseOutput,
35        crate::operation::create_automated_reasoning_policy_test_case::CreateAutomatedReasoningPolicyTestCaseError,
36    > for CreateAutomatedReasoningPolicyTestCaseFluentBuilder
37{
38    fn send(
39        self,
40        config_override: crate::config::Builder,
41    ) -> crate::client::customize::internal::BoxFuture<
42        crate::client::customize::internal::SendResult<
43            crate::operation::create_automated_reasoning_policy_test_case::CreateAutomatedReasoningPolicyTestCaseOutput,
44            crate::operation::create_automated_reasoning_policy_test_case::CreateAutomatedReasoningPolicyTestCaseError,
45        >,
46    > {
47        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
48    }
49}
50impl CreateAutomatedReasoningPolicyTestCaseFluentBuilder {
51    /// Creates a new `CreateAutomatedReasoningPolicyTestCaseFluentBuilder`.
52    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
53        Self {
54            handle,
55            inner: ::std::default::Default::default(),
56            config_override: ::std::option::Option::None,
57        }
58    }
59    /// Access the CreateAutomatedReasoningPolicyTestCase as a reference.
60    pub fn as_input(
61        &self,
62    ) -> &crate::operation::create_automated_reasoning_policy_test_case::builders::CreateAutomatedReasoningPolicyTestCaseInputBuilder {
63        &self.inner
64    }
65    /// Sends the request and returns the response.
66    ///
67    /// If an error occurs, an `SdkError` will be returned with additional details that
68    /// can be matched against.
69    ///
70    /// By default, any retryable failures will be retried twice. Retry behavior
71    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
72    /// set when configuring the client.
73    pub async fn send(
74        self,
75    ) -> ::std::result::Result<
76        crate::operation::create_automated_reasoning_policy_test_case::CreateAutomatedReasoningPolicyTestCaseOutput,
77        ::aws_smithy_runtime_api::client::result::SdkError<
78            crate::operation::create_automated_reasoning_policy_test_case::CreateAutomatedReasoningPolicyTestCaseError,
79            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
80        >,
81    > {
82        let input = self
83            .inner
84            .build()
85            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
86        let runtime_plugins =
87            crate::operation::create_automated_reasoning_policy_test_case::CreateAutomatedReasoningPolicyTestCase::operation_runtime_plugins(
88                self.handle.runtime_plugins.clone(),
89                &self.handle.conf,
90                self.config_override,
91            );
92        crate::operation::create_automated_reasoning_policy_test_case::CreateAutomatedReasoningPolicyTestCase::orchestrate(&runtime_plugins, input)
93            .await
94    }
95
96    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
97    pub fn customize(
98        self,
99    ) -> crate::client::customize::CustomizableOperation<
100        crate::operation::create_automated_reasoning_policy_test_case::CreateAutomatedReasoningPolicyTestCaseOutput,
101        crate::operation::create_automated_reasoning_policy_test_case::CreateAutomatedReasoningPolicyTestCaseError,
102        Self,
103    > {
104        crate::client::customize::CustomizableOperation::new(self)
105    }
106    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
107        self.set_config_override(::std::option::Option::Some(config_override.into()));
108        self
109    }
110
111    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
112        self.config_override = config_override;
113        self
114    }
115    /// <p>The Amazon Resource Name (ARN) of the Automated Reasoning policy for which to create the test.</p>
116    pub fn policy_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
117        self.inner = self.inner.policy_arn(input.into());
118        self
119    }
120    /// <p>The Amazon Resource Name (ARN) of the Automated Reasoning policy for which to create the test.</p>
121    pub fn set_policy_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
122        self.inner = self.inner.set_policy_arn(input);
123        self
124    }
125    /// <p>The Amazon Resource Name (ARN) of the Automated Reasoning policy for which to create the test.</p>
126    pub fn get_policy_arn(&self) -> &::std::option::Option<::std::string::String> {
127        self.inner.get_policy_arn()
128    }
129    /// <p>The output content that's validated by the Automated Reasoning policy. This represents the foundation model response that will be checked for accuracy.</p>
130    pub fn guard_content(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
131        self.inner = self.inner.guard_content(input.into());
132        self
133    }
134    /// <p>The output content that's validated by the Automated Reasoning policy. This represents the foundation model response that will be checked for accuracy.</p>
135    pub fn set_guard_content(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
136        self.inner = self.inner.set_guard_content(input);
137        self
138    }
139    /// <p>The output content that's validated by the Automated Reasoning policy. This represents the foundation model response that will be checked for accuracy.</p>
140    pub fn get_guard_content(&self) -> &::std::option::Option<::std::string::String> {
141        self.inner.get_guard_content()
142    }
143    /// <p>The input query or prompt that generated the content. This provides context for the validation.</p>
144    pub fn query_content(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
145        self.inner = self.inner.query_content(input.into());
146        self
147    }
148    /// <p>The input query or prompt that generated the content. This provides context for the validation.</p>
149    pub fn set_query_content(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
150        self.inner = self.inner.set_query_content(input);
151        self
152    }
153    /// <p>The input query or prompt that generated the content. This provides context for the validation.</p>
154    pub fn get_query_content(&self) -> &::std::option::Option<::std::string::String> {
155        self.inner.get_query_content()
156    }
157    /// <p>The expected result of the Automated Reasoning check. Valid values include: , TOO_COMPLEX, and NO_TRANSLATIONS.</p>
158    /// <ul>
159    /// <li>
160    /// <p><code>VALID</code> - The claims are true. The claims are implied by the premises and the Automated Reasoning policy. Given the Automated Reasoning policy and premises, it is not possible for these claims to be false. In other words, there are no alternative answers that are true that contradict the claims.</p></li>
161    /// <li>
162    /// <p><code>INVALID</code> - The claims are false. The claims are not implied by the premises and Automated Reasoning policy. Furthermore, there exists different claims that are consistent with the premises and Automated Reasoning policy.</p></li>
163    /// <li>
164    /// <p><code>SATISFIABLE</code> - The claims can be true or false. It depends on what assumptions are made for the claim to be implied from the premises and Automated Reasoning policy rules. In this situation, different assumptions can make input claims false and alternative claims true.</p></li>
165    /// <li>
166    /// <p><code>IMPOSSIBLE</code> - Automated Reasoning can’t make a statement about the claims. This can happen if the premises are logically incorrect, or if there is a conflict within the Automated Reasoning policy itself.</p></li>
167    /// <li>
168    /// <p><code>TRANSLATION_AMBIGUOUS</code> - Detected an ambiguity in the translation meant it would be unsound to continue with validity checking. Additional context or follow-up questions might be needed to get translation to succeed.</p></li>
169    /// <li>
170    /// <p><code>TOO_COMPLEX</code> - The input contains too much information for Automated Reasoning to process within its latency limits.</p></li>
171    /// <li>
172    /// <p><code>NO_TRANSLATIONS</code> - Identifies that some or all of the input prompt wasn't translated into logic. This can happen if the input isn't relevant to the Automated Reasoning policy, or if the policy doesn't have variables to model relevant input. If Automated Reasoning can't translate anything, you get a single <code>NO_TRANSLATIONS</code> finding. You might also see a <code>NO_TRANSLATIONS</code> (along with other findings) if some part of the validation isn't translated.</p></li>
173    /// </ul>
174    pub fn expected_aggregated_findings_result(mut self, input: crate::types::AutomatedReasoningCheckResult) -> Self {
175        self.inner = self.inner.expected_aggregated_findings_result(input);
176        self
177    }
178    /// <p>The expected result of the Automated Reasoning check. Valid values include: , TOO_COMPLEX, and NO_TRANSLATIONS.</p>
179    /// <ul>
180    /// <li>
181    /// <p><code>VALID</code> - The claims are true. The claims are implied by the premises and the Automated Reasoning policy. Given the Automated Reasoning policy and premises, it is not possible for these claims to be false. In other words, there are no alternative answers that are true that contradict the claims.</p></li>
182    /// <li>
183    /// <p><code>INVALID</code> - The claims are false. The claims are not implied by the premises and Automated Reasoning policy. Furthermore, there exists different claims that are consistent with the premises and Automated Reasoning policy.</p></li>
184    /// <li>
185    /// <p><code>SATISFIABLE</code> - The claims can be true or false. It depends on what assumptions are made for the claim to be implied from the premises and Automated Reasoning policy rules. In this situation, different assumptions can make input claims false and alternative claims true.</p></li>
186    /// <li>
187    /// <p><code>IMPOSSIBLE</code> - Automated Reasoning can’t make a statement about the claims. This can happen if the premises are logically incorrect, or if there is a conflict within the Automated Reasoning policy itself.</p></li>
188    /// <li>
189    /// <p><code>TRANSLATION_AMBIGUOUS</code> - Detected an ambiguity in the translation meant it would be unsound to continue with validity checking. Additional context or follow-up questions might be needed to get translation to succeed.</p></li>
190    /// <li>
191    /// <p><code>TOO_COMPLEX</code> - The input contains too much information for Automated Reasoning to process within its latency limits.</p></li>
192    /// <li>
193    /// <p><code>NO_TRANSLATIONS</code> - Identifies that some or all of the input prompt wasn't translated into logic. This can happen if the input isn't relevant to the Automated Reasoning policy, or if the policy doesn't have variables to model relevant input. If Automated Reasoning can't translate anything, you get a single <code>NO_TRANSLATIONS</code> finding. You might also see a <code>NO_TRANSLATIONS</code> (along with other findings) if some part of the validation isn't translated.</p></li>
194    /// </ul>
195    pub fn set_expected_aggregated_findings_result(mut self, input: ::std::option::Option<crate::types::AutomatedReasoningCheckResult>) -> Self {
196        self.inner = self.inner.set_expected_aggregated_findings_result(input);
197        self
198    }
199    /// <p>The expected result of the Automated Reasoning check. Valid values include: , TOO_COMPLEX, and NO_TRANSLATIONS.</p>
200    /// <ul>
201    /// <li>
202    /// <p><code>VALID</code> - The claims are true. The claims are implied by the premises and the Automated Reasoning policy. Given the Automated Reasoning policy and premises, it is not possible for these claims to be false. In other words, there are no alternative answers that are true that contradict the claims.</p></li>
203    /// <li>
204    /// <p><code>INVALID</code> - The claims are false. The claims are not implied by the premises and Automated Reasoning policy. Furthermore, there exists different claims that are consistent with the premises and Automated Reasoning policy.</p></li>
205    /// <li>
206    /// <p><code>SATISFIABLE</code> - The claims can be true or false. It depends on what assumptions are made for the claim to be implied from the premises and Automated Reasoning policy rules. In this situation, different assumptions can make input claims false and alternative claims true.</p></li>
207    /// <li>
208    /// <p><code>IMPOSSIBLE</code> - Automated Reasoning can’t make a statement about the claims. This can happen if the premises are logically incorrect, or if there is a conflict within the Automated Reasoning policy itself.</p></li>
209    /// <li>
210    /// <p><code>TRANSLATION_AMBIGUOUS</code> - Detected an ambiguity in the translation meant it would be unsound to continue with validity checking. Additional context or follow-up questions might be needed to get translation to succeed.</p></li>
211    /// <li>
212    /// <p><code>TOO_COMPLEX</code> - The input contains too much information for Automated Reasoning to process within its latency limits.</p></li>
213    /// <li>
214    /// <p><code>NO_TRANSLATIONS</code> - Identifies that some or all of the input prompt wasn't translated into logic. This can happen if the input isn't relevant to the Automated Reasoning policy, or if the policy doesn't have variables to model relevant input. If Automated Reasoning can't translate anything, you get a single <code>NO_TRANSLATIONS</code> finding. You might also see a <code>NO_TRANSLATIONS</code> (along with other findings) if some part of the validation isn't translated.</p></li>
215    /// </ul>
216    pub fn get_expected_aggregated_findings_result(&self) -> &::std::option::Option<crate::types::AutomatedReasoningCheckResult> {
217        self.inner.get_expected_aggregated_findings_result()
218    }
219    /// <p>A unique, case-sensitive identifier to ensure that the operation completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error.</p>
220    pub fn client_request_token(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
221        self.inner = self.inner.client_request_token(input.into());
222        self
223    }
224    /// <p>A unique, case-sensitive identifier to ensure that the operation completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error.</p>
225    pub fn set_client_request_token(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
226        self.inner = self.inner.set_client_request_token(input);
227        self
228    }
229    /// <p>A unique, case-sensitive identifier to ensure that the operation completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error.</p>
230    pub fn get_client_request_token(&self) -> &::std::option::Option<::std::string::String> {
231        self.inner.get_client_request_token()
232    }
233    /// <p>The minimum confidence level for logic validation. Content that meets the threshold is considered a high-confidence finding that can be validated.</p>
234    pub fn confidence_threshold(mut self, input: f64) -> Self {
235        self.inner = self.inner.confidence_threshold(input);
236        self
237    }
238    /// <p>The minimum confidence level for logic validation. Content that meets the threshold is considered a high-confidence finding that can be validated.</p>
239    pub fn set_confidence_threshold(mut self, input: ::std::option::Option<f64>) -> Self {
240        self.inner = self.inner.set_confidence_threshold(input);
241        self
242    }
243    /// <p>The minimum confidence level for logic validation. Content that meets the threshold is considered a high-confidence finding that can be validated.</p>
244    pub fn get_confidence_threshold(&self) -> &::std::option::Option<f64> {
245        self.inner.get_confidence_threshold()
246    }
247}