1 1 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
|
2 2 | #[allow(missing_docs)] // documentation missing in model
|
3 3 | #[non_exhaustive]
|
4 - | #[derive(::std::clone::Clone, ::std::cmp::PartialEq)]
|
4 + | #[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
|
5 5 | pub struct ConverseStreamInput {
|
6 - | /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
|
6 + | /// <p>The ID for the model.</p>
|
7 + | /// <p>The <code>modelId</code> to provide depends on the type of model that you use:</p>
|
7 8 | /// <ul>
|
8 9 | /// <li>
|
9 10 | /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
|
10 11 | /// <li>
|
11 - | /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
|
12 - | /// <li>
|
13 12 | /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
|
14 13 | /// <li>
|
15 14 | /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
|
16 - | /// <li>
|
17 - | /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
|
18 15 | /// </ul>
|
19 - | /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
|
20 16 | pub model_id: ::std::option::Option<::std::string::String>,
|
21 17 | /// <p>The messages that you want to send to the model.</p>
|
22 18 | pub messages: ::std::option::Option<::std::vec::Vec<crate::types::Message>>,
|
23 - | /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
|
19 + | /// <p>A system prompt to send to the model.</p>
|
24 20 | pub system: ::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>>,
|
25 - | /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
21 + | /// <p>Inference parameters to pass to the model. <code>ConverseStream</code> supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
26 22 | pub inference_config: ::std::option::Option<crate::types::InferenceConfiguration>,
|
27 - | /// <p>Configuration information for the tools that the model can use when generating a response.</p>
|
28 - | /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
|
23 + | /// <p>Configuration information for the tools that the model can use when generating a response.</p><note>
|
24 + | /// <p>This field is only supported by Anthropic Claude 3 models.</p>
|
25 + | /// </note>
|
29 26 | pub tool_config: ::std::option::Option<crate::types::ToolConfiguration>,
|
30 - | /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
|
31 - | pub guardrail_config: ::std::option::Option<crate::types::GuardrailStreamConfiguration>,
|
32 - | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
27 + | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>ConverseStream</code> supports in the <code>inferenceConfig</code> field.</p>
|
33 28 | pub additional_model_request_fields: ::std::option::Option<::aws_smithy_types::Document>,
|
34 - | /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
|
35 - | pub prompt_variables: ::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>>,
|
36 - | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
29 + | /// <p>Additional model parameters field paths to return in the response. <code>ConverseStream</code> returns the requested fields as a JSON Pointer object in the <code>additionalModelResultFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
37 30 | /// <p><code>\[ "/stop_sequence" \]</code></p>
|
38 31 | /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
|
39 - | /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
32 + | /// <p><code>ConverseStream</code> rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>ConverseStream</code>.</p>
|
40 33 | pub additional_model_response_field_paths: ::std::option::Option<::std::vec::Vec<::std::string::String>>,
|
41 - | /// <p>Key-value pairs that you can use to filter invocation logs.</p>
|
42 - | pub request_metadata: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
|
43 - | /// <p>Model performance settings for the request.</p>
|
44 - | pub performance_config: ::std::option::Option<crate::types::PerformanceConfiguration>,
|
45 34 | }
|
46 35 | impl ConverseStreamInput {
|
47 - | /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
|
36 + | /// <p>The ID for the model.</p>
|
37 + | /// <p>The <code>modelId</code> to provide depends on the type of model that you use:</p>
|
48 38 | /// <ul>
|
49 39 | /// <li>
|
50 40 | /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
|
51 41 | /// <li>
|
52 - | /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
|
53 - | /// <li>
|
54 42 | /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
|
55 43 | /// <li>
|
56 44 | /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
|
57 - | /// <li>
|
58 - | /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
|
59 45 | /// </ul>
|
60 - | /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
|
61 46 | pub fn model_id(&self) -> ::std::option::Option<&str> {
|
62 47 | self.model_id.as_deref()
|
63 48 | }
|
64 49 | /// <p>The messages that you want to send to the model.</p>
|
65 50 | ///
|
66 51 | /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.messages.is_none()`.
|
67 52 | pub fn messages(&self) -> &[crate::types::Message] {
|
68 53 | self.messages.as_deref().unwrap_or_default()
|
69 54 | }
|
70 - | /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
|
55 + | /// <p>A system prompt to send to the model.</p>
|
71 56 | ///
|
72 57 | /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.system.is_none()`.
|
73 58 | pub fn system(&self) -> &[crate::types::SystemContentBlock] {
|
74 59 | self.system.as_deref().unwrap_or_default()
|
75 60 | }
|
76 - | /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
61 + | /// <p>Inference parameters to pass to the model. <code>ConverseStream</code> supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
77 62 | pub fn inference_config(&self) -> ::std::option::Option<&crate::types::InferenceConfiguration> {
|
78 63 | self.inference_config.as_ref()
|
79 64 | }
|
80 - | /// <p>Configuration information for the tools that the model can use when generating a response.</p>
|
81 - | /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
|
65 + | /// <p>Configuration information for the tools that the model can use when generating a response.</p><note>
|
66 + | /// <p>This field is only supported by Anthropic Claude 3 models.</p>
|
67 + | /// </note>
|
82 68 | pub fn tool_config(&self) -> ::std::option::Option<&crate::types::ToolConfiguration> {
|
83 69 | self.tool_config.as_ref()
|
84 70 | }
|
85 - | /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
|
86 - | pub fn guardrail_config(&self) -> ::std::option::Option<&crate::types::GuardrailStreamConfiguration> {
|
87 - | self.guardrail_config.as_ref()
|
88 - | }
|
89 - | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
71 + | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>ConverseStream</code> supports in the <code>inferenceConfig</code> field.</p>
|
90 72 | pub fn additional_model_request_fields(&self) -> ::std::option::Option<&::aws_smithy_types::Document> {
|
91 73 | self.additional_model_request_fields.as_ref()
|
92 74 | }
|
93 - | /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
|
94 - | pub fn prompt_variables(&self) -> ::std::option::Option<&::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>> {
|
95 - | self.prompt_variables.as_ref()
|
96 - | }
|
97 - | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
75 + | /// <p>Additional model parameters field paths to return in the response. <code>ConverseStream</code> returns the requested fields as a JSON Pointer object in the <code>additionalModelResultFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
98 76 | /// <p><code>\[ "/stop_sequence" \]</code></p>
|
99 77 | /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
|
100 - | /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
78 + | /// <p><code>ConverseStream</code> rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>ConverseStream</code>.</p>
|
101 79 | ///
|
102 80 | /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.additional_model_response_field_paths.is_none()`.
|
103 81 | pub fn additional_model_response_field_paths(&self) -> &[::std::string::String] {
|
104 82 | self.additional_model_response_field_paths.as_deref().unwrap_or_default()
|
105 83 | }
|
106 - | /// <p>Key-value pairs that you can use to filter invocation logs.</p>
|
107 - | pub fn request_metadata(&self) -> ::std::option::Option<&::std::collections::HashMap<::std::string::String, ::std::string::String>> {
|
108 - | self.request_metadata.as_ref()
|
109 - | }
|
110 - | /// <p>Model performance settings for the request.</p>
|
111 - | pub fn performance_config(&self) -> ::std::option::Option<&crate::types::PerformanceConfiguration> {
|
112 - | self.performance_config.as_ref()
|
113 - | }
|
114 - | }
|
115 - | impl ::std::fmt::Debug for ConverseStreamInput {
|
116 - | fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
|
117 - | let mut formatter = f.debug_struct("ConverseStreamInput");
|
118 - | formatter.field("model_id", &self.model_id);
|
119 - | formatter.field("messages", &self.messages);
|
120 - | formatter.field("system", &self.system);
|
121 - | formatter.field("inference_config", &self.inference_config);
|
122 - | formatter.field("tool_config", &self.tool_config);
|
123 - | formatter.field("guardrail_config", &self.guardrail_config);
|
124 - | formatter.field("additional_model_request_fields", &self.additional_model_request_fields);
|
125 - | formatter.field("prompt_variables", &"*** Sensitive Data Redacted ***");
|
126 - | formatter.field("additional_model_response_field_paths", &self.additional_model_response_field_paths);
|
127 - | formatter.field("request_metadata", &"*** Sensitive Data Redacted ***");
|
128 - | formatter.field("performance_config", &self.performance_config);
|
129 - | formatter.finish()
|
130 - | }
|
131 84 | }
|
132 85 | impl ConverseStreamInput {
|
133 86 | /// Creates a new builder-style object to manufacture [`ConverseStreamInput`](crate::operation::converse_stream::ConverseStreamInput).
|
134 87 | pub fn builder() -> crate::operation::converse_stream::builders::ConverseStreamInputBuilder {
|
135 88 | crate::operation::converse_stream::builders::ConverseStreamInputBuilder::default()
|
136 89 | }
|
137 90 | }
|
138 91 |
|
139 92 | /// A builder for [`ConverseStreamInput`](crate::operation::converse_stream::ConverseStreamInput).
|
140 - | #[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default)]
|
93 + | #[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
|
141 94 | #[non_exhaustive]
|
142 95 | pub struct ConverseStreamInputBuilder {
|
143 96 | pub(crate) model_id: ::std::option::Option<::std::string::String>,
|
144 97 | pub(crate) messages: ::std::option::Option<::std::vec::Vec<crate::types::Message>>,
|
145 98 | pub(crate) system: ::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>>,
|
146 99 | pub(crate) inference_config: ::std::option::Option<crate::types::InferenceConfiguration>,
|
147 100 | pub(crate) tool_config: ::std::option::Option<crate::types::ToolConfiguration>,
|
148 - | pub(crate) guardrail_config: ::std::option::Option<crate::types::GuardrailStreamConfiguration>,
|
149 101 | pub(crate) additional_model_request_fields: ::std::option::Option<::aws_smithy_types::Document>,
|
150 - | pub(crate) prompt_variables: ::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>>,
|
151 102 | pub(crate) additional_model_response_field_paths: ::std::option::Option<::std::vec::Vec<::std::string::String>>,
|
152 - | pub(crate) request_metadata: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
|
153 - | pub(crate) performance_config: ::std::option::Option<crate::types::PerformanceConfiguration>,
|
154 103 | }
|
155 104 | impl ConverseStreamInputBuilder {
|
156 - | /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
|
105 + | /// <p>The ID for the model.</p>
|
106 + | /// <p>The <code>modelId</code> to provide depends on the type of model that you use:</p>
|
157 107 | /// <ul>
|
158 108 | /// <li>
|
159 109 | /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
|
160 110 | /// <li>
|
161 - | /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
|
162 - | /// <li>
|
163 111 | /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
|
164 112 | /// <li>
|
165 113 | /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
|
166 - | /// <li>
|
167 - | /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
|
168 114 | /// </ul>
|
169 - | /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
|
170 115 | /// This field is required.
|
171 116 | pub fn model_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
|
172 117 | self.model_id = ::std::option::Option::Some(input.into());
|
173 118 | self
|
174 119 | }
|
175 - | /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
|
120 + | /// <p>The ID for the model.</p>
|
121 + | /// <p>The <code>modelId</code> to provide depends on the type of model that you use:</p>
|
176 122 | /// <ul>
|
177 123 | /// <li>
|
178 124 | /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
|
179 125 | /// <li>
|
180 - | /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
|
181 - | /// <li>
|
182 126 | /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
|
183 127 | /// <li>
|
184 128 | /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
|
185 - | /// <li>
|
186 - | /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
|
187 129 | /// </ul>
|
188 - | /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
|
189 130 | pub fn set_model_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
|
190 131 | self.model_id = input;
|
191 132 | self
|
192 133 | }
|
193 - | /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
|
134 + | /// <p>The ID for the model.</p>
|
135 + | /// <p>The <code>modelId</code> to provide depends on the type of model that you use:</p>
|
194 136 | /// <ul>
|
195 137 | /// <li>
|
196 138 | /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
|
197 139 | /// <li>
|
198 - | /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
|
199 - | /// <li>
|
200 140 | /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
|
201 141 | /// <li>
|
202 142 | /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
|
203 - | /// <li>
|
204 - | /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
|
205 143 | /// </ul>
|
206 - | /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
|
207 144 | pub fn get_model_id(&self) -> &::std::option::Option<::std::string::String> {
|
208 145 | &self.model_id
|
209 146 | }
|
210 147 | /// Appends an item to `messages`.
|
211 148 | ///
|
212 149 | /// To override the contents of this collection use [`set_messages`](Self::set_messages).
|
213 150 | ///
|
214 151 | /// <p>The messages that you want to send to the model.</p>
|
215 152 | pub fn messages(mut self, input: crate::types::Message) -> Self {
|
216 153 | let mut v = self.messages.unwrap_or_default();
|
217 154 | v.push(input);
|
218 155 | self.messages = ::std::option::Option::Some(v);
|
219 156 | self
|
220 157 | }
|
221 158 | /// <p>The messages that you want to send to the model.</p>
|
222 159 | pub fn set_messages(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Message>>) -> Self {
|
223 160 | self.messages = input;
|
224 161 | self
|
225 162 | }
|
226 163 | /// <p>The messages that you want to send to the model.</p>
|
227 164 | pub fn get_messages(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Message>> {
|
228 165 | &self.messages
|
229 166 | }
|
230 167 | /// Appends an item to `system`.
|
231 168 | ///
|
232 169 | /// To override the contents of this collection use [`set_system`](Self::set_system).
|
233 170 | ///
|
234 - | /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
|
171 + | /// <p>A system prompt to send to the model.</p>
|
235 172 | pub fn system(mut self, input: crate::types::SystemContentBlock) -> Self {
|
236 173 | let mut v = self.system.unwrap_or_default();
|
237 174 | v.push(input);
|
238 175 | self.system = ::std::option::Option::Some(v);
|
239 176 | self
|
240 177 | }
|
241 - | /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
|
178 + | /// <p>A system prompt to send to the model.</p>
|
242 179 | pub fn set_system(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>>) -> Self {
|
243 180 | self.system = input;
|
244 181 | self
|
245 182 | }
|
246 - | /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
|
183 + | /// <p>A system prompt to send to the model.</p>
|
247 184 | pub fn get_system(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>> {
|
248 185 | &self.system
|
249 186 | }
|
250 - | /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
187 + | /// <p>Inference parameters to pass to the model. <code>ConverseStream</code> supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
251 188 | pub fn inference_config(mut self, input: crate::types::InferenceConfiguration) -> Self {
|
252 189 | self.inference_config = ::std::option::Option::Some(input);
|
253 190 | self
|
254 191 | }
|
255 - | /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
192 + | /// <p>Inference parameters to pass to the model. <code>ConverseStream</code> supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
256 193 | pub fn set_inference_config(mut self, input: ::std::option::Option<crate::types::InferenceConfiguration>) -> Self {
|
257 194 | self.inference_config = input;
|
258 195 | self
|
259 196 | }
|
260 - | /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
197 + | /// <p>Inference parameters to pass to the model. <code>ConverseStream</code> supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
|
261 198 | pub fn get_inference_config(&self) -> &::std::option::Option<crate::types::InferenceConfiguration> {
|
262 199 | &self.inference_config
|
263 200 | }
|
264 - | /// <p>Configuration information for the tools that the model can use when generating a response.</p>
|
265 - | /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
|
201 + | /// <p>Configuration information for the tools that the model can use when generating a response.</p><note>
|
202 + | /// <p>This field is only supported by Anthropic Claude 3 models.</p>
|
203 + | /// </note>
|
266 204 | pub fn tool_config(mut self, input: crate::types::ToolConfiguration) -> Self {
|
267 205 | self.tool_config = ::std::option::Option::Some(input);
|
268 206 | self
|
269 207 | }
|
270 - | /// <p>Configuration information for the tools that the model can use when generating a response.</p>
|
271 - | /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
|
208 + | /// <p>Configuration information for the tools that the model can use when generating a response.</p><note>
|
209 + | /// <p>This field is only supported by Anthropic Claude 3 models.</p>
|
210 + | /// </note>
|
272 211 | pub fn set_tool_config(mut self, input: ::std::option::Option<crate::types::ToolConfiguration>) -> Self {
|
273 212 | self.tool_config = input;
|
274 213 | self
|
275 214 | }
|
276 - | /// <p>Configuration information for the tools that the model can use when generating a response.</p>
|
277 - | /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
|
215 + | /// <p>Configuration information for the tools that the model can use when generating a response.</p><note>
|
216 + | /// <p>This field is only supported by Anthropic Claude 3 models.</p>
|
217 + | /// </note>
|
278 218 | pub fn get_tool_config(&self) -> &::std::option::Option<crate::types::ToolConfiguration> {
|
279 219 | &self.tool_config
|
280 220 | }
|
281 - | /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
|
282 - | pub fn guardrail_config(mut self, input: crate::types::GuardrailStreamConfiguration) -> Self {
|
283 - | self.guardrail_config = ::std::option::Option::Some(input);
|
284 - | self
|
285 - | }
|
286 - | /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
|
287 - | pub fn set_guardrail_config(mut self, input: ::std::option::Option<crate::types::GuardrailStreamConfiguration>) -> Self {
|
288 - | self.guardrail_config = input;
|
289 - | self
|
290 - | }
|
291 - | /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
|
292 - | pub fn get_guardrail_config(&self) -> &::std::option::Option<crate::types::GuardrailStreamConfiguration> {
|
293 - | &self.guardrail_config
|
294 - | }
|
295 - | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
221 + | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>ConverseStream</code> supports in the <code>inferenceConfig</code> field.</p>
|
296 222 | pub fn additional_model_request_fields(mut self, input: ::aws_smithy_types::Document) -> Self {
|
297 223 | self.additional_model_request_fields = ::std::option::Option::Some(input);
|
298 224 | self
|
299 225 | }
|
300 - | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
226 + | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>ConverseStream</code> supports in the <code>inferenceConfig</code> field.</p>
|
301 227 | pub fn set_additional_model_request_fields(mut self, input: ::std::option::Option<::aws_smithy_types::Document>) -> Self {
|
302 228 | self.additional_model_request_fields = input;
|
303 229 | self
|
304 230 | }
|
305 - | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
|
231 + | /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>ConverseStream</code> supports in the <code>inferenceConfig</code> field.</p>
|
306 232 | pub fn get_additional_model_request_fields(&self) -> &::std::option::Option<::aws_smithy_types::Document> {
|
307 233 | &self.additional_model_request_fields
|
308 234 | }
|
309 - | /// Adds a key-value pair to `prompt_variables`.
|
310 - | ///
|
311 - | /// To override the contents of this collection use [`set_prompt_variables`](Self::set_prompt_variables).
|
312 - | ///
|
313 - | /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
|
314 - | pub fn prompt_variables(mut self, k: impl ::std::convert::Into<::std::string::String>, v: crate::types::PromptVariableValues) -> Self {
|
315 - | let mut hash_map = self.prompt_variables.unwrap_or_default();
|
316 - | hash_map.insert(k.into(), v);
|
317 - | self.prompt_variables = ::std::option::Option::Some(hash_map);
|
318 - | self
|
319 - | }
|
320 - | /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
|
321 - | pub fn set_prompt_variables(
|
322 - | mut self,
|
323 - | input: ::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>>,
|
324 - | ) -> Self {
|
325 - | self.prompt_variables = input;
|
326 - | self
|
327 - | }
|
328 - | /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
|
329 - | pub fn get_prompt_variables(
|
330 - | &self,
|
331 - | ) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>> {
|
332 - | &self.prompt_variables
|
333 - | }
|
334 235 | /// Appends an item to `additional_model_response_field_paths`.
|
335 236 | ///
|
336 237 | /// To override the contents of this collection use [`set_additional_model_response_field_paths`](Self::set_additional_model_response_field_paths).
|
337 238 | ///
|
338 - | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
239 + | /// <p>Additional model parameters field paths to return in the response. <code>ConverseStream</code> returns the requested fields as a JSON Pointer object in the <code>additionalModelResultFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
339 240 | /// <p><code>\[ "/stop_sequence" \]</code></p>
|
340 241 | /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
|
341 - | /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
242 + | /// <p><code>ConverseStream</code> rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>ConverseStream</code>.</p>
|
342 243 | pub fn additional_model_response_field_paths(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
|
343 244 | let mut v = self.additional_model_response_field_paths.unwrap_or_default();
|
344 245 | v.push(input.into());
|
345 246 | self.additional_model_response_field_paths = ::std::option::Option::Some(v);
|
346 247 | self
|
347 248 | }
|
348 - | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
249 + | /// <p>Additional model parameters field paths to return in the response. <code>ConverseStream</code> returns the requested fields as a JSON Pointer object in the <code>additionalModelResultFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
349 250 | /// <p><code>\[ "/stop_sequence" \]</code></p>
|
350 251 | /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
|
351 - | /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
252 + | /// <p><code>ConverseStream</code> rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>ConverseStream</code>.</p>
|
352 253 | pub fn set_additional_model_response_field_paths(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
|
353 254 | self.additional_model_response_field_paths = input;
|
354 255 | self
|
355 256 | }
|
356 - | /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
257 + | /// <p>Additional model parameters field paths to return in the response. <code>ConverseStream</code> returns the requested fields as a JSON Pointer object in the <code>additionalModelResultFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
|
357 258 | /// <p><code>\[ "/stop_sequence" \]</code></p>
|
358 259 | /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
|
359 - | /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
|
260 + | /// <p><code>ConverseStream</code> rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>ConverseStream</code>.</p>
|
360 261 | pub fn get_additional_model_response_field_paths(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
|
361 262 | &self.additional_model_response_field_paths
|
362 263 | }
|
363 - | /// Adds a key-value pair to `request_metadata`.
|
364 - | ///
|
365 - | /// To override the contents of this collection use [`set_request_metadata`](Self::set_request_metadata).
|
366 - | ///
|
367 - | /// <p>Key-value pairs that you can use to filter invocation logs.</p>
|
368 - | pub fn request_metadata(
|
369 - | mut self,
|
370 - | k: impl ::std::convert::Into<::std::string::String>,
|
371 - | v: impl ::std::convert::Into<::std::string::String>,
|
372 - | ) -> Self {
|
373 - | let mut hash_map = self.request_metadata.unwrap_or_default();
|
374 - | hash_map.insert(k.into(), v.into());
|
375 - | self.request_metadata = ::std::option::Option::Some(hash_map);
|
376 - | self
|
377 - | }
|
378 - | /// <p>Key-value pairs that you can use to filter invocation logs.</p>
|
379 - | pub fn set_request_metadata(
|
380 - | mut self,
|
381 - | input: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
|
382 - | ) -> Self {
|
383 - | self.request_metadata = input;
|
384 - | self
|
385 - | }
|
386 - | /// <p>Key-value pairs that you can use to filter invocation logs.</p>
|
387 - | pub fn get_request_metadata(&self) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>> {
|
388 - | &self.request_metadata
|
389 - | }
|
390 - | /// <p>Model performance settings for the request.</p>
|
391 - | pub fn performance_config(mut self, input: crate::types::PerformanceConfiguration) -> Self {
|
392 - | self.performance_config = ::std::option::Option::Some(input);
|
393 - | self
|
394 - | }
|
395 - | /// <p>Model performance settings for the request.</p>
|
396 - | pub fn set_performance_config(mut self, input: ::std::option::Option<crate::types::PerformanceConfiguration>) -> Self {
|
397 - | self.performance_config = input;
|
398 - | self
|
399 - | }
|
400 - | /// <p>Model performance settings for the request.</p>
|
401 - | pub fn get_performance_config(&self) -> &::std::option::Option<crate::types::PerformanceConfiguration> {
|
402 - | &self.performance_config
|
403 - | }
|
404 264 | /// Consumes the builder and constructs a [`ConverseStreamInput`](crate::operation::converse_stream::ConverseStreamInput).
|
405 265 | pub fn build(
|
406 266 | self,
|
407 267 | ) -> ::std::result::Result<crate::operation::converse_stream::ConverseStreamInput, ::aws_smithy_types::error::operation::BuildError> {
|
408 268 | ::std::result::Result::Ok(crate::operation::converse_stream::ConverseStreamInput {
|
409 269 | model_id: self.model_id,
|
410 270 | messages: self.messages,
|
411 271 | system: self.system,
|
412 272 | inference_config: self.inference_config,
|
413 273 | tool_config: self.tool_config,
|
414 - | guardrail_config: self.guardrail_config,
|
415 274 | additional_model_request_fields: self.additional_model_request_fields,
|
416 - | prompt_variables: self.prompt_variables,
|
417 275 | additional_model_response_field_paths: self.additional_model_response_field_paths,
|
418 - | request_metadata: self.request_metadata,
|
419 - | performance_config: self.performance_config,
|
420 276 | })
|
421 277 | }
|
422 278 | }
|
423 - | impl ::std::fmt::Debug for ConverseStreamInputBuilder {
|
424 - | fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
|
425 - | let mut formatter = f.debug_struct("ConverseStreamInputBuilder");
|
426 - | formatter.field("model_id", &self.model_id);
|
427 - | formatter.field("messages", &self.messages);
|
428 - | formatter.field("system", &self.system);
|
429 - | formatter.field("inference_config", &self.inference_config);
|
430 - | formatter.field("tool_config", &self.tool_config);
|
431 - | formatter.field("guardrail_config", &self.guardrail_config);
|
432 - | formatter.field("additional_model_request_fields", &self.additional_model_request_fields);
|
433 - | formatter.field("prompt_variables", &"*** Sensitive Data Redacted ***");
|
434 - | formatter.field("additional_model_response_field_paths", &self.additional_model_response_field_paths);
|
435 - | formatter.field("request_metadata", &"*** Sensitive Data Redacted ***");
|
436 - | formatter.field("performance_config", &self.performance_config);
|
437 - | formatter.finish()
|
438 - | }
|
439 - | }
|