1 1 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
|
2 2 | #[allow(missing_docs)] // documentation missing in model
|
3 3 | #[non_exhaustive]
|
4 - | #[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
|
4 + | #[derive(::std::clone::Clone, ::std::cmp::PartialEq)]
|
5 5 | pub struct ConverseInput {
|
6 - | /// <p>The identifier for the model that you want to call.</p>
|
7 - | /// <p>The <code>modelId</code> to provide depends on the type of model that you use:</p>
|
6 + | /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
|
8 7 | /// <ul>
|
9 8 | /// <li>
|
10 9 | /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
|
11 10 | /// <li>
|
11 + | /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
|
12 + | /// <li>
|
12 13 | /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
|
13 14 | /// <li>
|
14 15 | /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
|
16 + | /// <li>
|
17 + | /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
|
15 18 | /// </ul>
|
19 + | /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
|
16 20 | pub model_id: ::std::option::Option<::std::string::String>,
|
17 21 | /// <p>The messages that you want to send to the model.</p>
|
18 22 | pub messages: ::std::option::Option<::std::vec::Vec<crate::types::Message>>,
|
19 - | /// <p>A system prompt to pass to the model.</p>
|
23 + | /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
|
20 24 | pub system: ::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>>,
|
21 - | /// <p>Inference parameters to pass to the model. <code>Converse</code> supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
25 + | /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
22 26 | pub inference_config: ::std::option::Option<crate::types::InferenceConfiguration>,
|
23 - | /// <p>Configuration information for the tools that the model can use when generating a response.</p><note>
|
24 - | /// <p>This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.</p>
|
25 - | /// </note>
|
27 + | /// <p>Configuration information for the tools that the model can use when generating a response.</p>
|
28 + | /// <p>For information about models that support tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
|
26 29 | pub tool_config: ::std::option::Option<crate::types::ToolConfiguration>,
|
27 - | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> supports in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
30 + | /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
|
31 + | pub guardrail_config: ::std::option::Option<crate::types::GuardrailConfiguration>,
|
32 + | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
28 33 | pub additional_model_request_fields: ::std::option::Option<::aws_smithy_types::Document>,
|
29 - | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> returns the requested fields as a JSON Pointer object in the <code>additionalModelResultFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
34 + | /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
|
35 + | pub prompt_variables: ::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>>,
|
36 + | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
30 37 | /// <p><code>\[ "/stop_sequence" \]</code></p>
|
31 38 | /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
|
32 - | /// <p><code>Converse</code> rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
39 + | /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
33 40 | pub additional_model_response_field_paths: ::std::option::Option<::std::vec::Vec<::std::string::String>>,
|
41 + | /// <p>Key-value pairs that you can use to filter invocation logs.</p>
|
42 + | pub request_metadata: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
|
43 + | /// <p>Model performance settings for the request.</p>
|
44 + | pub performance_config: ::std::option::Option<crate::types::PerformanceConfiguration>,
|
34 45 | }
|
35 46 | impl ConverseInput {
|
36 - | /// <p>The identifier for the model that you want to call.</p>
|
37 - | /// <p>The <code>modelId</code> to provide depends on the type of model that you use:</p>
|
47 + | /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
|
38 48 | /// <ul>
|
39 49 | /// <li>
|
40 50 | /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
|
41 51 | /// <li>
|
52 + | /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
|
53 + | /// <li>
|
42 54 | /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
|
43 55 | /// <li>
|
44 56 | /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
|
57 + | /// <li>
|
58 + | /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
|
45 59 | /// </ul>
|
60 + | /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
|
46 61 | pub fn model_id(&self) -> ::std::option::Option<&str> {
|
47 62 | self.model_id.as_deref()
|
48 63 | }
|
49 64 | /// <p>The messages that you want to send to the model.</p>
|
50 65 | ///
|
51 66 | /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.messages.is_none()`.
|
52 67 | pub fn messages(&self) -> &[crate::types::Message] {
|
53 68 | self.messages.as_deref().unwrap_or_default()
|
54 69 | }
|
55 - | /// <p>A system prompt to pass to the model.</p>
|
70 + | /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
|
56 71 | ///
|
57 72 | /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.system.is_none()`.
|
58 73 | pub fn system(&self) -> &[crate::types::SystemContentBlock] {
|
59 74 | self.system.as_deref().unwrap_or_default()
|
60 75 | }
|
61 - | /// <p>Inference parameters to pass to the model. <code>Converse</code> supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
76 + | /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
62 77 | pub fn inference_config(&self) -> ::std::option::Option<&crate::types::InferenceConfiguration> {
|
63 78 | self.inference_config.as_ref()
|
64 79 | }
|
65 - | /// <p>Configuration information for the tools that the model can use when generating a response.</p><note>
|
66 - | /// <p>This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.</p>
|
67 - | /// </note>
|
80 + | /// <p>Configuration information for the tools that the model can use when generating a response.</p>
|
81 + | /// <p>For information about models that support tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
|
68 82 | pub fn tool_config(&self) -> ::std::option::Option<&crate::types::ToolConfiguration> {
|
69 83 | self.tool_config.as_ref()
|
70 84 | }
|
71 - | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> supports in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
85 + | /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
|
86 + | pub fn guardrail_config(&self) -> ::std::option::Option<&crate::types::GuardrailConfiguration> {
|
87 + | self.guardrail_config.as_ref()
|
88 + | }
|
89 + | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
72 90 | pub fn additional_model_request_fields(&self) -> ::std::option::Option<&::aws_smithy_types::Document> {
|
73 91 | self.additional_model_request_fields.as_ref()
|
74 92 | }
|
75 - | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> returns the requested fields as a JSON Pointer object in the <code>additionalModelResultFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
93 + | /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
|
94 + | pub fn prompt_variables(&self) -> ::std::option::Option<&::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>> {
|
95 + | self.prompt_variables.as_ref()
|
96 + | }
|
97 + | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
76 98 | /// <p><code>\[ "/stop_sequence" \]</code></p>
|
77 99 | /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
|
78 - | /// <p><code>Converse</code> rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
100 + | /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
79 101 | ///
|
80 102 | /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.additional_model_response_field_paths.is_none()`.
|
81 103 | pub fn additional_model_response_field_paths(&self) -> &[::std::string::String] {
|
82 104 | self.additional_model_response_field_paths.as_deref().unwrap_or_default()
|
83 105 | }
|
106 + | /// <p>Key-value pairs that you can use to filter invocation logs.</p>
|
107 + | pub fn request_metadata(&self) -> ::std::option::Option<&::std::collections::HashMap<::std::string::String, ::std::string::String>> {
|
108 + | self.request_metadata.as_ref()
|
109 + | }
|
110 + | /// <p>Model performance settings for the request.</p>
|
111 + | pub fn performance_config(&self) -> ::std::option::Option<&crate::types::PerformanceConfiguration> {
|
112 + | self.performance_config.as_ref()
|
113 + | }
|
114 + | }
|
115 + | impl ::std::fmt::Debug for ConverseInput {
|
116 + | fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
|
117 + | let mut formatter = f.debug_struct("ConverseInput");
|
118 + | formatter.field("model_id", &self.model_id);
|
119 + | formatter.field("messages", &self.messages);
|
120 + | formatter.field("system", &self.system);
|
121 + | formatter.field("inference_config", &self.inference_config);
|
122 + | formatter.field("tool_config", &self.tool_config);
|
123 + | formatter.field("guardrail_config", &self.guardrail_config);
|
124 + | formatter.field("additional_model_request_fields", &self.additional_model_request_fields);
|
125 + | formatter.field("prompt_variables", &"*** Sensitive Data Redacted ***");
|
126 + | formatter.field("additional_model_response_field_paths", &self.additional_model_response_field_paths);
|
127 + | formatter.field("request_metadata", &"*** Sensitive Data Redacted ***");
|
128 + | formatter.field("performance_config", &self.performance_config);
|
129 + | formatter.finish()
|
130 + | }
|
84 131 | }
|
85 132 | impl ConverseInput {
|
86 133 | /// Creates a new builder-style object to manufacture [`ConverseInput`](crate::operation::converse::ConverseInput).
|
87 134 | pub fn builder() -> crate::operation::converse::builders::ConverseInputBuilder {
|
88 135 | crate::operation::converse::builders::ConverseInputBuilder::default()
|
89 136 | }
|
90 137 | }
|
91 138 |
|
92 139 | /// A builder for [`ConverseInput`](crate::operation::converse::ConverseInput).
|
93 - | #[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
|
140 + | #[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default)]
|
94 141 | #[non_exhaustive]
|
95 142 | pub struct ConverseInputBuilder {
|
96 143 | pub(crate) model_id: ::std::option::Option<::std::string::String>,
|
97 144 | pub(crate) messages: ::std::option::Option<::std::vec::Vec<crate::types::Message>>,
|
98 145 | pub(crate) system: ::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>>,
|
99 146 | pub(crate) inference_config: ::std::option::Option<crate::types::InferenceConfiguration>,
|
100 147 | pub(crate) tool_config: ::std::option::Option<crate::types::ToolConfiguration>,
|
148 + | pub(crate) guardrail_config: ::std::option::Option<crate::types::GuardrailConfiguration>,
|
101 149 | pub(crate) additional_model_request_fields: ::std::option::Option<::aws_smithy_types::Document>,
|
150 + | pub(crate) prompt_variables: ::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>>,
|
102 151 | pub(crate) additional_model_response_field_paths: ::std::option::Option<::std::vec::Vec<::std::string::String>>,
|
152 + | pub(crate) request_metadata: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
|
153 + | pub(crate) performance_config: ::std::option::Option<crate::types::PerformanceConfiguration>,
|
103 154 | }
|
104 155 | impl ConverseInputBuilder {
|
105 - | /// <p>The identifier for the model that you want to call.</p>
|
106 - | /// <p>The <code>modelId</code> to provide depends on the type of model that you use:</p>
|
156 + | /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
|
107 157 | /// <ul>
|
108 158 | /// <li>
|
109 159 | /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
|
110 160 | /// <li>
|
161 + | /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
|
162 + | /// <li>
|
111 163 | /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
|
112 164 | /// <li>
|
113 165 | /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
|
166 + | /// <li>
|
167 + | /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
|
114 168 | /// </ul>
|
169 + | /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
|
115 170 | /// This field is required.
|
116 171 | pub fn model_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
|
117 172 | self.model_id = ::std::option::Option::Some(input.into());
|
118 173 | self
|
119 174 | }
|
120 - | /// <p>The identifier for the model that you want to call.</p>
|
121 - | /// <p>The <code>modelId</code> to provide depends on the type of model that you use:</p>
|
175 + | /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
|
122 176 | /// <ul>
|
123 177 | /// <li>
|
124 178 | /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
|
125 179 | /// <li>
|
180 + | /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
|
181 + | /// <li>
|
126 182 | /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
|
127 183 | /// <li>
|
128 184 | /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
|
185 + | /// <li>
|
186 + | /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
|
129 187 | /// </ul>
|
188 + | /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
|
130 189 | pub fn set_model_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
|
131 190 | self.model_id = input;
|
132 191 | self
|
133 192 | }
|
134 - | /// <p>The identifier for the model that you want to call.</p>
|
135 - | /// <p>The <code>modelId</code> to provide depends on the type of model that you use:</p>
|
193 + | /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
|
136 194 | /// <ul>
|
137 195 | /// <li>
|
138 196 | /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
|
139 197 | /// <li>
|
198 + | /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
|
199 + | /// <li>
|
140 200 | /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
|
141 201 | /// <li>
|
142 202 | /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
|
203 + | /// <li>
|
204 + | /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
|
143 205 | /// </ul>
|
206 + | /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
|
144 207 | pub fn get_model_id(&self) -> &::std::option::Option<::std::string::String> {
|
145 208 | &self.model_id
|
146 209 | }
|
147 210 | /// Appends an item to `messages`.
|
148 211 | ///
|
149 212 | /// To override the contents of this collection use [`set_messages`](Self::set_messages).
|
150 213 | ///
|
151 214 | /// <p>The messages that you want to send to the model.</p>
|
152 215 | pub fn messages(mut self, input: crate::types::Message) -> Self {
|
153 216 | let mut v = self.messages.unwrap_or_default();
|
154 217 | v.push(input);
|
155 218 | self.messages = ::std::option::Option::Some(v);
|
156 219 | self
|
157 220 | }
|
158 221 | /// <p>The messages that you want to send to the model.</p>
|
159 222 | pub fn set_messages(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Message>>) -> Self {
|
160 223 | self.messages = input;
|
161 224 | self
|
162 225 | }
|
163 226 | /// <p>The messages that you want to send to the model.</p>
|
164 227 | pub fn get_messages(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Message>> {
|
165 228 | &self.messages
|
166 229 | }
|
167 230 | /// Appends an item to `system`.
|
168 231 | ///
|
169 232 | /// To override the contents of this collection use [`set_system`](Self::set_system).
|
170 233 | ///
|
171 - | /// <p>A system prompt to pass to the model.</p>
|
234 + | /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
|
172 235 | pub fn system(mut self, input: crate::types::SystemContentBlock) -> Self {
|
173 236 | let mut v = self.system.unwrap_or_default();
|
174 237 | v.push(input);
|
175 238 | self.system = ::std::option::Option::Some(v);
|
176 239 | self
|
177 240 | }
|
178 - | /// <p>A system prompt to pass to the model.</p>
|
241 + | /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
|
179 242 | pub fn set_system(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>>) -> Self {
|
180 243 | self.system = input;
|
181 244 | self
|
182 245 | }
|
183 - | /// <p>A system prompt to pass to the model.</p>
|
246 + | /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
|
184 247 | pub fn get_system(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>> {
|
185 248 | &self.system
|
186 249 | }
|
187 - | /// <p>Inference parameters to pass to the model. <code>Converse</code> supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
250 + | /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
188 251 | pub fn inference_config(mut self, input: crate::types::InferenceConfiguration) -> Self {
|
189 252 | self.inference_config = ::std::option::Option::Some(input);
|
190 253 | self
|
191 254 | }
|
192 - | /// <p>Inference parameters to pass to the model. <code>Converse</code> supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
255 + | /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
193 256 | pub fn set_inference_config(mut self, input: ::std::option::Option<crate::types::InferenceConfiguration>) -> Self {
|
194 257 | self.inference_config = input;
|
195 258 | self
|
196 259 | }
|
197 - | /// <p>Inference parameters to pass to the model. <code>Converse</code> supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
260 + | /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
198 261 | pub fn get_inference_config(&self) -> &::std::option::Option<crate::types::InferenceConfiguration> {
|
199 262 | &self.inference_config
|
200 263 | }
|
201 - | /// <p>Configuration information for the tools that the model can use when generating a response.</p><note>
|
202 - | /// <p>This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.</p>
|
203 - | /// </note>
|
264 + | /// <p>Configuration information for the tools that the model can use when generating a response.</p>
|
265 + | /// <p>For information about models that support tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
|
204 266 | pub fn tool_config(mut self, input: crate::types::ToolConfiguration) -> Self {
|
205 267 | self.tool_config = ::std::option::Option::Some(input);
|
206 268 | self
|
207 269 | }
|
208 - | /// <p>Configuration information for the tools that the model can use when generating a response.</p><note>
|
209 - | /// <p>This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.</p>
|
210 - | /// </note>
|
270 + | /// <p>Configuration information for the tools that the model can use when generating a response.</p>
|
271 + | /// <p>For information about models that support tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
|
211 272 | pub fn set_tool_config(mut self, input: ::std::option::Option<crate::types::ToolConfiguration>) -> Self {
|
212 273 | self.tool_config = input;
|
213 274 | self
|
214 275 | }
|
215 - | /// <p>Configuration information for the tools that the model can use when generating a response.</p><note>
|
216 - | /// <p>This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.</p>
|
217 - | /// </note>
|
276 + | /// <p>Configuration information for the tools that the model can use when generating a response.</p>
|
277 + | /// <p>For information about models that support tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
|
218 278 | pub fn get_tool_config(&self) -> &::std::option::Option<crate::types::ToolConfiguration> {
|
219 279 | &self.tool_config
|
220 280 | }
|
221 - | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> supports in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
281 + | /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
|
282 + | pub fn guardrail_config(mut self, input: crate::types::GuardrailConfiguration) -> Self {
|
283 + | self.guardrail_config = ::std::option::Option::Some(input);
|
284 + | self
|
285 + | }
|
286 + | /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
|
287 + | pub fn set_guardrail_config(mut self, input: ::std::option::Option<crate::types::GuardrailConfiguration>) -> Self {
|
288 + | self.guardrail_config = input;
|
289 + | self
|
290 + | }
|
291 + | /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
|
292 + | pub fn get_guardrail_config(&self) -> &::std::option::Option<crate::types::GuardrailConfiguration> {
|
293 + | &self.guardrail_config
|
294 + | }
|
295 + | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
222 296 | pub fn additional_model_request_fields(mut self, input: ::aws_smithy_types::Document) -> Self {
|
223 297 | self.additional_model_request_fields = ::std::option::Option::Some(input);
|
224 298 | self
|
225 299 | }
|
226 - | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> supports in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
300 + | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
227 301 | pub fn set_additional_model_request_fields(mut self, input: ::std::option::Option<::aws_smithy_types::Document>) -> Self {
|
228 302 | self.additional_model_request_fields = input;
|
229 303 | self
|
230 304 | }
|
231 - | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> supports in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
305 + | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
232 306 | pub fn get_additional_model_request_fields(&self) -> &::std::option::Option<::aws_smithy_types::Document> {
|
233 307 | &self.additional_model_request_fields
|
234 308 | }
|
309 + | /// Adds a key-value pair to `prompt_variables`.
|
310 + | ///
|
311 + | /// To override the contents of this collection use [`set_prompt_variables`](Self::set_prompt_variables).
|
312 + | ///
|
313 + | /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
|
314 + | pub fn prompt_variables(mut self, k: impl ::std::convert::Into<::std::string::String>, v: crate::types::PromptVariableValues) -> Self {
|
315 + | let mut hash_map = self.prompt_variables.unwrap_or_default();
|
316 + | hash_map.insert(k.into(), v);
|
317 + | self.prompt_variables = ::std::option::Option::Some(hash_map);
|
318 + | self
|
319 + | }
|
320 + | /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
|
321 + | pub fn set_prompt_variables(
|
322 + | mut self,
|
323 + | input: ::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>>,
|
324 + | ) -> Self {
|
325 + | self.prompt_variables = input;
|
326 + | self
|
327 + | }
|
328 + | /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
|
329 + | pub fn get_prompt_variables(
|
330 + | &self,
|
331 + | ) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>> {
|
332 + | &self.prompt_variables
|
333 + | }
|
235 334 | /// Appends an item to `additional_model_response_field_paths`.
|
236 335 | ///
|
237 336 | /// To override the contents of this collection use [`set_additional_model_response_field_paths`](Self::set_additional_model_response_field_paths).
|
238 337 | ///
|
239 - | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> returns the requested fields as a JSON Pointer object in the <code>additionalModelResultFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
338 + | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
240 339 | /// <p><code>\[ "/stop_sequence" \]</code></p>
|
241 340 | /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
|
242 - | /// <p><code>Converse</code> rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
341 + | /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
243 342 | pub fn additional_model_response_field_paths(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
|
244 343 | let mut v = self.additional_model_response_field_paths.unwrap_or_default();
|
245 344 | v.push(input.into());
|
246 345 | self.additional_model_response_field_paths = ::std::option::Option::Some(v);
|
247 346 | self
|
248 347 | }
|
249 - | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> returns the requested fields as a JSON Pointer object in the <code>additionalModelResultFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
348 + | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
250 349 | /// <p><code>\[ "/stop_sequence" \]</code></p>
|
251 350 | /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
|
252 - | /// <p><code>Converse</code> rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
351 + | /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
253 352 | pub fn set_additional_model_response_field_paths(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
|
254 353 | self.additional_model_response_field_paths = input;
|
255 354 | self
|
256 355 | }
|
257 - | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> returns the requested fields as a JSON Pointer object in the <code>additionalModelResultFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
356 + | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
258 357 | /// <p><code>\[ "/stop_sequence" \]</code></p>
|
259 358 | /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
|
260 - | /// <p><code>Converse</code> rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
359 + | /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
261 360 | pub fn get_additional_model_response_field_paths(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
|
262 361 | &self.additional_model_response_field_paths
|
263 362 | }
|
363 + | /// Adds a key-value pair to `request_metadata`.
|
364 + | ///
|
365 + | /// To override the contents of this collection use [`set_request_metadata`](Self::set_request_metadata).
|
366 + | ///
|
367 + | /// <p>Key-value pairs that you can use to filter invocation logs.</p>
|
368 + | pub fn request_metadata(
|
369 + | mut self,
|
370 + | k: impl ::std::convert::Into<::std::string::String>,
|
371 + | v: impl ::std::convert::Into<::std::string::String>,
|
372 + | ) -> Self {
|
373 + | let mut hash_map = self.request_metadata.unwrap_or_default();
|
374 + | hash_map.insert(k.into(), v.into());
|
375 + | self.request_metadata = ::std::option::Option::Some(hash_map);
|
376 + | self
|
377 + | }
|
378 + | /// <p>Key-value pairs that you can use to filter invocation logs.</p>
|
379 + | pub fn set_request_metadata(
|
380 + | mut self,
|
381 + | input: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
|
382 + | ) -> Self {
|
383 + | self.request_metadata = input;
|
384 + | self
|
385 + | }
|
386 + | /// <p>Key-value pairs that you can use to filter invocation logs.</p>
|
387 + | pub fn get_request_metadata(&self) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>> {
|
388 + | &self.request_metadata
|
389 + | }
|
390 + | /// <p>Model performance settings for the request.</p>
|
391 + | pub fn performance_config(mut self, input: crate::types::PerformanceConfiguration) -> Self {
|
392 + | self.performance_config = ::std::option::Option::Some(input);
|
393 + | self
|
394 + | }
|
395 + | /// <p>Model performance settings for the request.</p>
|
396 + | pub fn set_performance_config(mut self, input: ::std::option::Option<crate::types::PerformanceConfiguration>) -> Self {
|
397 + | self.performance_config = input;
|
398 + | self
|
399 + | }
|
400 + | /// <p>Model performance settings for the request.</p>
|
401 + | pub fn get_performance_config(&self) -> &::std::option::Option<crate::types::PerformanceConfiguration> {
|
402 + | &self.performance_config
|
403 + | }
|
264 404 | /// Consumes the builder and constructs a [`ConverseInput`](crate::operation::converse::ConverseInput).
|
265 405 | pub fn build(self) -> ::std::result::Result<crate::operation::converse::ConverseInput, ::aws_smithy_types::error::operation::BuildError> {
|
266 406 | ::std::result::Result::Ok(crate::operation::converse::ConverseInput {
|
267 407 | model_id: self.model_id,
|
268 408 | messages: self.messages,
|
269 409 | system: self.system,
|
270 410 | inference_config: self.inference_config,
|
271 411 | tool_config: self.tool_config,
|
412 + | guardrail_config: self.guardrail_config,
|
272 413 | additional_model_request_fields: self.additional_model_request_fields,
|
414 + | prompt_variables: self.prompt_variables,
|
273 415 | additional_model_response_field_paths: self.additional_model_response_field_paths,
|
416 + | request_metadata: self.request_metadata,
|
417 + | performance_config: self.performance_config,
|
274 418 | })
|
275 419 | }
|
276 420 | }
|
421 + | impl ::std::fmt::Debug for ConverseInputBuilder {
|
422 + | fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
|
423 + | let mut formatter = f.debug_struct("ConverseInputBuilder");
|
424 + | formatter.field("model_id", &self.model_id);
|
425 + | formatter.field("messages", &self.messages);
|
426 + | formatter.field("system", &self.system);
|
427 + | formatter.field("inference_config", &self.inference_config);
|
428 + | formatter.field("tool_config", &self.tool_config);
|
429 + | formatter.field("guardrail_config", &self.guardrail_config);
|
430 + | formatter.field("additional_model_request_fields", &self.additional_model_request_fields);
|
431 + | formatter.field("prompt_variables", &"*** Sensitive Data Redacted ***");
|
432 + | formatter.field("additional_model_response_field_paths", &self.additional_model_response_field_paths);
|
433 + | formatter.field("request_metadata", &"*** Sensitive Data Redacted ***");
|
434 + | formatter.field("performance_config", &self.performance_config);
|
435 + | formatter.finish()
|
436 + | }
|
437 + | }
|