aws_sdk_transcribestreaming/operation/start_medical_stream_transcription/builders.rs
1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::start_medical_stream_transcription::_start_medical_stream_transcription_output::StartMedicalStreamTranscriptionOutputBuilder;
3
4pub use crate::operation::start_medical_stream_transcription::_start_medical_stream_transcription_input::StartMedicalStreamTranscriptionInputBuilder;
5
6impl crate::operation::start_medical_stream_transcription::builders::StartMedicalStreamTranscriptionInputBuilder {
7 /// Sends a request with this input using the given client.
8 pub async fn send_with(self, client: &crate::Client) -> ::std::result::Result<
9 crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput,
10 ::aws_smithy_runtime_api::client::result::SdkError<
11 crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError,
12 ::aws_smithy_runtime_api::client::orchestrator::HttpResponse
13 >
14 > {
15 let mut fluent_builder = client.start_medical_stream_transcription();
16 fluent_builder.inner = self;
17 fluent_builder.send().await
18 }
19 }
20/// Fluent builder constructing a request to `StartMedicalStreamTranscription`.
21///
22/// <p>Starts a bidirectional HTTP/2 or WebSocket stream where audio is streamed to Amazon Transcribe Medical and the transcription results are streamed to your application.</p>
23/// <p>The following parameters are required:</p>
24/// <ul>
25/// <li>
26/// <p><code>language-code</code></p></li>
27/// <li>
28/// <p><code>media-encoding</code></p></li>
29/// <li>
30/// <p><code>sample-rate</code></p></li>
31/// </ul>
32/// <p>For more information on streaming with Amazon Transcribe Medical, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
33#[derive(::std::fmt::Debug)]
34pub struct StartMedicalStreamTranscriptionFluentBuilder {
35 handle: ::std::sync::Arc<crate::client::Handle>,
36 inner: crate::operation::start_medical_stream_transcription::builders::StartMedicalStreamTranscriptionInputBuilder,
37config_override: ::std::option::Option<crate::config::Builder>,
38 }
39impl
40 crate::client::customize::internal::CustomizableSend<
41 crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput,
42 crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError,
43 > for StartMedicalStreamTranscriptionFluentBuilder
44 {
45 fn send(
46 self,
47 config_override: crate::config::Builder,
48 ) -> crate::client::customize::internal::BoxFuture<
49 crate::client::customize::internal::SendResult<
50 crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput,
51 crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError,
52 >,
53 > {
54 ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
55 }
56 }
57impl StartMedicalStreamTranscriptionFluentBuilder {
58 /// Creates a new `StartMedicalStreamTranscriptionFluentBuilder`.
59 pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
60 Self {
61 handle,
62 inner: ::std::default::Default::default(),
63 config_override: ::std::option::Option::None,
64 }
65 }
66 /// Access the StartMedicalStreamTranscription as a reference.
67 pub fn as_input(&self) -> &crate::operation::start_medical_stream_transcription::builders::StartMedicalStreamTranscriptionInputBuilder {
68 &self.inner
69 }
70 /// Sends the request and returns the response.
71 ///
72 /// If an error occurs, an `SdkError` will be returned with additional details that
73 /// can be matched against.
74 ///
75 /// By default, any retryable failures will be retried twice. Retry behavior
76 /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
77 /// set when configuring the client.
78 pub async fn send(self) -> ::std::result::Result<crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput, ::aws_smithy_runtime_api::client::result::SdkError<crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError, ::aws_smithy_runtime_api::client::orchestrator::HttpResponse>> {
79 let input = self.inner.build().map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
80 let runtime_plugins = crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscription::operation_runtime_plugins(
81 self.handle.runtime_plugins.clone(),
82 &self.handle.conf,
83 self.config_override,
84 );
85 let mut output =
86 crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscription::orchestrate(
87 &runtime_plugins,
88 input,
89 )
90 .await?;
91
92 // Converts any error encountered beyond this point into an `SdkError` response error
93 // with an `HttpResponse`. However, since we have already exited the `orchestrate`
94 // function, the original `HttpResponse` is no longer available and cannot be restored.
95 // This means that header information from the original response has been lost.
96 //
97 // Note that the response body would have been consumed by the deserializer
98 // regardless, even if the initial message was hypothetically processed during
99 // the orchestrator's deserialization phase but later resulted in an error.
100 fn response_error(
101 err: impl ::std::convert::Into<::aws_smithy_runtime_api::box_error::BoxError>
102 ) -> ::aws_smithy_runtime_api::client::result::SdkError<crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError, ::aws_smithy_runtime_api::client::orchestrator::HttpResponse> {
103 ::aws_smithy_runtime_api::client::result::SdkError::response_error(err, ::aws_smithy_runtime_api::client::orchestrator::HttpResponse::new(
104 ::aws_smithy_runtime_api::http::StatusCode::try_from(200).expect("valid successful code"),
105 ::aws_smithy_types::body::SdkBody::empty()))
106 }
107
108 let message = output.transcript_result_stream.try_recv_initial_response().await.map_err(response_error)?;
109
110 match message {
111 ::std::option::Option::Some(_message) => {
112
113 ::std::result::Result::Ok(output)
114 }
115 ::std::option::Option::None => ::std::result::Result::Ok(output),
116 }
117 }
118
119 /// Consumes this builder, creating a customizable operation that can be modified before being sent.
120 pub fn customize(
121 self,
122 ) -> crate::client::customize::CustomizableOperation<crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput, crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError, Self> {
123 crate::client::customize::CustomizableOperation::new(self)
124 }
125 pub(crate) fn config_override(
126 mut self,
127 config_override: impl ::std::convert::Into<crate::config::Builder>,
128 ) -> Self {
129 self.set_config_override(::std::option::Option::Some(config_override.into()));
130 self
131 }
132
133 pub(crate) fn set_config_override(
134 &mut self,
135 config_override: ::std::option::Option<crate::config::Builder>,
136 ) -> &mut Self {
137 self.config_override = config_override;
138 self
139 }
140 /// <p>Specify the language code that represents the language spoken in your audio.</p><important>
141 /// <p>Amazon Transcribe Medical only supports US English (<code>en-US</code>).</p>
142 /// </important>
143 pub fn language_code(mut self, input: crate::types::LanguageCode) -> Self {
144 self.inner = self.inner.language_code(input);
145 self
146 }
147 /// <p>Specify the language code that represents the language spoken in your audio.</p><important>
148 /// <p>Amazon Transcribe Medical only supports US English (<code>en-US</code>).</p>
149 /// </important>
150 pub fn set_language_code(mut self, input: ::std::option::Option<crate::types::LanguageCode>) -> Self {
151 self.inner = self.inner.set_language_code(input);
152 self
153 }
154 /// <p>Specify the language code that represents the language spoken in your audio.</p><important>
155 /// <p>Amazon Transcribe Medical only supports US English (<code>en-US</code>).</p>
156 /// </important>
157 pub fn get_language_code(&self) -> &::std::option::Option<crate::types::LanguageCode> {
158 self.inner.get_language_code()
159 }
160 /// <p>The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>
161 pub fn media_sample_rate_hertz(mut self, input: i32) -> Self {
162 self.inner = self.inner.media_sample_rate_hertz(input);
163 self
164 }
165 /// <p>The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>
166 pub fn set_media_sample_rate_hertz(mut self, input: ::std::option::Option<i32>) -> Self {
167 self.inner = self.inner.set_media_sample_rate_hertz(input);
168 self
169 }
170 /// <p>The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>
171 pub fn get_media_sample_rate_hertz(&self) -> &::std::option::Option<i32> {
172 self.inner.get_media_sample_rate_hertz()
173 }
174 /// <p>Specify the encoding used for the input audio. Supported formats are:</p>
175 /// <ul>
176 /// <li>
177 /// <p>FLAC</p></li>
178 /// <li>
179 /// <p>OPUS-encoded audio in an Ogg container</p></li>
180 /// <li>
181 /// <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p></li>
182 /// </ul>
183 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio">Media formats</a>.</p>
184 pub fn media_encoding(mut self, input: crate::types::MediaEncoding) -> Self {
185 self.inner = self.inner.media_encoding(input);
186 self
187 }
188 /// <p>Specify the encoding used for the input audio. Supported formats are:</p>
189 /// <ul>
190 /// <li>
191 /// <p>FLAC</p></li>
192 /// <li>
193 /// <p>OPUS-encoded audio in an Ogg container</p></li>
194 /// <li>
195 /// <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p></li>
196 /// </ul>
197 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio">Media formats</a>.</p>
198 pub fn set_media_encoding(mut self, input: ::std::option::Option<crate::types::MediaEncoding>) -> Self {
199 self.inner = self.inner.set_media_encoding(input);
200 self
201 }
202 /// <p>Specify the encoding used for the input audio. Supported formats are:</p>
203 /// <ul>
204 /// <li>
205 /// <p>FLAC</p></li>
206 /// <li>
207 /// <p>OPUS-encoded audio in an Ogg container</p></li>
208 /// <li>
209 /// <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p></li>
210 /// </ul>
211 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio">Media formats</a>.</p>
212 pub fn get_media_encoding(&self) -> &::std::option::Option<crate::types::MediaEncoding> {
213 self.inner.get_media_encoding()
214 }
215 /// <p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
216 pub fn vocabulary_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
217 self.inner = self.inner.vocabulary_name(input.into());
218 self
219 }
220 /// <p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
221 pub fn set_vocabulary_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
222 self.inner = self.inner.set_vocabulary_name(input);
223 self
224 }
225 /// <p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
226 pub fn get_vocabulary_name(&self) -> &::std::option::Option<::std::string::String> {
227 self.inner.get_vocabulary_name()
228 }
229 /// <p>Specify the medical specialty contained in your audio.</p>
230 pub fn specialty(mut self, input: crate::types::Specialty) -> Self {
231 self.inner = self.inner.specialty(input);
232 self
233 }
234 /// <p>Specify the medical specialty contained in your audio.</p>
235 pub fn set_specialty(mut self, input: ::std::option::Option<crate::types::Specialty>) -> Self {
236 self.inner = self.inner.set_specialty(input);
237 self
238 }
239 /// <p>Specify the medical specialty contained in your audio.</p>
240 pub fn get_specialty(&self) -> &::std::option::Option<crate::types::Specialty> {
241 self.inner.get_specialty()
242 }
243 /// <p>Specify the type of input audio. For example, choose <code>DICTATION</code> for a provider dictating patient notes and <code>CONVERSATION</code> for a dialogue between a patient and a medical professional.</p>
244 pub fn r#type(mut self, input: crate::types::Type) -> Self {
245 self.inner = self.inner.r#type(input);
246 self
247 }
248 /// <p>Specify the type of input audio. For example, choose <code>DICTATION</code> for a provider dictating patient notes and <code>CONVERSATION</code> for a dialogue between a patient and a medical professional.</p>
249 pub fn set_type(mut self, input: ::std::option::Option<crate::types::Type>) -> Self {
250 self.inner = self.inner.set_type(input);
251 self
252 }
253 /// <p>Specify the type of input audio. For example, choose <code>DICTATION</code> for a provider dictating patient notes and <code>CONVERSATION</code> for a dialogue between a patient and a medical professional.</p>
254 pub fn get_type(&self) -> &::std::option::Option<crate::types::Type> {
255 self.inner.get_type()
256 }
257 /// <p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p>
258 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html">Partitioning speakers (diarization)</a>.</p>
259 pub fn show_speaker_label(mut self, input: bool) -> Self {
260 self.inner = self.inner.show_speaker_label(input);
261 self
262 }
263 /// <p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p>
264 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html">Partitioning speakers (diarization)</a>.</p>
265 pub fn set_show_speaker_label(mut self, input: ::std::option::Option<bool>) -> Self {
266 self.inner = self.inner.set_show_speaker_label(input);
267 self
268 }
269 /// <p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p>
270 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html">Partitioning speakers (diarization)</a>.</p>
271 pub fn get_show_speaker_label(&self) -> &::std::option::Option<bool> {
272 self.inner.get_show_speaker_label()
273 }
274 /// <p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response.</p>
275 pub fn session_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
276 self.inner = self.inner.session_id(input.into());
277 self
278 }
279 /// <p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response.</p>
280 pub fn set_session_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
281 self.inner = self.inner.set_session_id(input);
282 self
283 }
284 /// <p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response.</p>
285 pub fn get_session_id(&self) -> &::std::option::Option<::std::string::String> {
286 self.inner.get_session_id()
287 }
288 /// <p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p>
289 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
290 pub fn audio_stream(mut self, input: ::aws_smithy_http::event_stream::EventStreamSender<crate::types::AudioStream, crate::types::error::AudioStreamError>) -> Self {
291 self.inner = self.inner.audio_stream(input);
292 self
293 }
294 /// <p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p>
295 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
296 pub fn set_audio_stream(mut self, input: ::std::option::Option<::aws_smithy_http::event_stream::EventStreamSender<crate::types::AudioStream, crate::types::error::AudioStreamError>>) -> Self {
297 self.inner = self.inner.set_audio_stream(input);
298 self
299 }
300 /// <p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p>
301 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
302 pub fn get_audio_stream(&self) -> &::std::option::Option<::aws_smithy_http::event_stream::EventStreamSender<crate::types::AudioStream, crate::types::error::AudioStreamError>> {
303 self.inner.get_audio_stream()
304 }
305 /// <p>Enables channel identification in multi-channel audio.</p>
306 /// <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p>
307 /// <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p>
308 /// <p>If you include <code>EnableChannelIdentification</code> in your request, you must also include <code>NumberOfChannels</code>.</p>
309 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html">Transcribing multi-channel audio</a>.</p>
310 pub fn enable_channel_identification(mut self, input: bool) -> Self {
311 self.inner = self.inner.enable_channel_identification(input);
312 self
313 }
314 /// <p>Enables channel identification in multi-channel audio.</p>
315 /// <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p>
316 /// <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p>
317 /// <p>If you include <code>EnableChannelIdentification</code> in your request, you must also include <code>NumberOfChannels</code>.</p>
318 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html">Transcribing multi-channel audio</a>.</p>
319 pub fn set_enable_channel_identification(mut self, input: ::std::option::Option<bool>) -> Self {
320 self.inner = self.inner.set_enable_channel_identification(input);
321 self
322 }
323 /// <p>Enables channel identification in multi-channel audio.</p>
324 /// <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p>
325 /// <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p>
326 /// <p>If you include <code>EnableChannelIdentification</code> in your request, you must also include <code>NumberOfChannels</code>.</p>
327 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html">Transcribing multi-channel audio</a>.</p>
328 pub fn get_enable_channel_identification(&self) -> &::std::option::Option<bool> {
329 self.inner.get_enable_channel_identification()
330 }
331 /// <p>Specify the number of channels in your audio stream. This value must be <code>2</code>, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request.</p>
332 /// <p>If you include <code>NumberOfChannels</code> in your request, you must also include <code>EnableChannelIdentification</code>.</p>
333 pub fn number_of_channels(mut self, input: i32) -> Self {
334 self.inner = self.inner.number_of_channels(input);
335 self
336 }
337 /// <p>Specify the number of channels in your audio stream. This value must be <code>2</code>, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request.</p>
338 /// <p>If you include <code>NumberOfChannels</code> in your request, you must also include <code>EnableChannelIdentification</code>.</p>
339 pub fn set_number_of_channels(mut self, input: ::std::option::Option<i32>) -> Self {
340 self.inner = self.inner.set_number_of_channels(input);
341 self
342 }
343 /// <p>Specify the number of channels in your audio stream. This value must be <code>2</code>, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request.</p>
344 /// <p>If you include <code>NumberOfChannels</code> in your request, you must also include <code>EnableChannelIdentification</code>.</p>
345 pub fn get_number_of_channels(&self) -> &::std::option::Option<i32> {
346 self.inner.get_number_of_channels()
347 }
348 /// <p>Labels all personal health information (PHI) identified in your transcript.</p>
349 /// <p>Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment.</p>
350 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html">Identifying personal health information (PHI) in a transcription</a>.</p>
351 pub fn content_identification_type(mut self, input: crate::types::MedicalContentIdentificationType) -> Self {
352 self.inner = self.inner.content_identification_type(input);
353 self
354 }
355 /// <p>Labels all personal health information (PHI) identified in your transcript.</p>
356 /// <p>Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment.</p>
357 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html">Identifying personal health information (PHI) in a transcription</a>.</p>
358 pub fn set_content_identification_type(mut self, input: ::std::option::Option<crate::types::MedicalContentIdentificationType>) -> Self {
359 self.inner = self.inner.set_content_identification_type(input);
360 self
361 }
362 /// <p>Labels all personal health information (PHI) identified in your transcript.</p>
363 /// <p>Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment.</p>
364 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html">Identifying personal health information (PHI) in a transcription</a>.</p>
365 pub fn get_content_identification_type(&self) -> &::std::option::Option<crate::types::MedicalContentIdentificationType> {
366 self.inner.get_content_identification_type()
367 }
368}
369