aws_sdk_transcribestreaming/operation/start_medical_stream_transcription/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::start_medical_stream_transcription::_start_medical_stream_transcription_output::StartMedicalStreamTranscriptionOutputBuilder;
3
4pub use crate::operation::start_medical_stream_transcription::_start_medical_stream_transcription_input::StartMedicalStreamTranscriptionInputBuilder;
5
6impl crate::operation::start_medical_stream_transcription::builders::StartMedicalStreamTranscriptionInputBuilder {
7                    /// Sends a request with this input using the given client.
8                    pub async fn send_with(self, client: &crate::Client) -> ::std::result::Result<
9                        crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput,
10                        ::aws_smithy_runtime_api::client::result::SdkError<
11                            crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError,
12                            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse
13                        >
14                    > {
15                        let mut fluent_builder = client.start_medical_stream_transcription();
16                        fluent_builder.inner = self;
17                        fluent_builder.send().await
18                    }
19                }
20/// Fluent builder constructing a request to `StartMedicalStreamTranscription`.
21/// 
22/// <p>Starts a bidirectional HTTP/2 or WebSocket stream where audio is streamed to Amazon Transcribe Medical and the transcription results are streamed to your application.</p>
23/// <p>The following parameters are required:</p>
24/// <ul>
25/// <li>
26/// <p><code>language-code</code></p></li>
27/// <li>
28/// <p><code>media-encoding</code></p></li>
29/// <li>
30/// <p><code>sample-rate</code></p></li>
31/// </ul>
32/// <p>For more information on streaming with Amazon Transcribe Medical, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
33#[derive(::std::fmt::Debug)]
34pub struct StartMedicalStreamTranscriptionFluentBuilder {
35                handle: ::std::sync::Arc<crate::client::Handle>,
36                inner: crate::operation::start_medical_stream_transcription::builders::StartMedicalStreamTranscriptionInputBuilder,
37config_override: ::std::option::Option<crate::config::Builder>,
38            }
39impl
40                crate::client::customize::internal::CustomizableSend<
41                    crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput,
42                    crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError,
43                > for StartMedicalStreamTranscriptionFluentBuilder
44            {
45                fn send(
46                    self,
47                    config_override: crate::config::Builder,
48                ) -> crate::client::customize::internal::BoxFuture<
49                    crate::client::customize::internal::SendResult<
50                        crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput,
51                        crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError,
52                    >,
53                > {
54                    ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
55                }
56            }
57impl StartMedicalStreamTranscriptionFluentBuilder {
58    /// Creates a new `StartMedicalStreamTranscriptionFluentBuilder`.
59                    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
60                        Self {
61                            handle,
62                            inner: ::std::default::Default::default(),
63    config_override: ::std::option::Option::None,
64                        }
65                    }
66    /// Access the StartMedicalStreamTranscription as a reference.
67                    pub fn as_input(&self) -> &crate::operation::start_medical_stream_transcription::builders::StartMedicalStreamTranscriptionInputBuilder {
68                        &self.inner
69                    }
70    /// Sends the request and returns the response.
71                    ///
72                    /// If an error occurs, an `SdkError` will be returned with additional details that
73                    /// can be matched against.
74                    ///
75                    /// By default, any retryable failures will be retried twice. Retry behavior
76                    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
77                    /// set when configuring the client. Note: retries are enabled by default when using
78                    /// `aws_config::load_from_env()` or when using `BehaviorVersion::v2025_01_17()` or later.
79                    pub async fn send(self) -> ::std::result::Result<crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput, ::aws_smithy_runtime_api::client::result::SdkError<crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError, ::aws_smithy_runtime_api::client::orchestrator::HttpResponse>> {
80                        let input = self.inner.build().map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
81                        let runtime_plugins = crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscription::operation_runtime_plugins(
82                            self.handle.runtime_plugins.clone(),
83                            &self.handle.conf,
84                            self.config_override,
85                        );
86                        let mut output =
87                        crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscription::orchestrate(
88                            &runtime_plugins,
89                            input,
90                        )
91                        .await?;
92    
93                    // Converts any error encountered beyond this point into an `SdkError` response error
94                    // with an `HttpResponse`. However, since we have already exited the `orchestrate`
95                    // function, the original `HttpResponse` is no longer available and cannot be restored.
96                    // This means that header information from the original response has been lost.
97                    //
98                    // Note that the response body would have been consumed by the deserializer
99                    // regardless, even if the initial message was hypothetically processed during
100                    // the orchestrator's deserialization phase but later resulted in an error.
101                    fn response_error(
102                        err: impl ::std::convert::Into<::aws_smithy_runtime_api::box_error::BoxError>
103                    ) -> ::aws_smithy_runtime_api::client::result::SdkError<crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError, ::aws_smithy_runtime_api::client::orchestrator::HttpResponse> {
104                        ::aws_smithy_runtime_api::client::result::SdkError::response_error(err, ::aws_smithy_runtime_api::client::orchestrator::HttpResponse::new(
105                            ::aws_smithy_runtime_api::http::StatusCode::try_from(200).expect("valid successful code"),
106                            ::aws_smithy_types::body::SdkBody::empty()))
107                    }
108    
109                    let message = output.transcript_result_stream.try_recv_initial_response().await.map_err(response_error)?;
110    
111                    match message {
112                        ::std::option::Option::Some(_message) => {
113                            
114                            ::std::result::Result::Ok(output)
115                        }
116                        ::std::option::Option::None => ::std::result::Result::Ok(output),
117                    }
118                    }
119    
120                    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
121                    pub fn customize(
122                        self,
123                    ) -> crate::client::customize::CustomizableOperation<crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput, crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError, Self> {
124                        crate::client::customize::CustomizableOperation::new(self)
125                    }
126    pub(crate) fn config_override(
127                            mut self,
128                            config_override: impl ::std::convert::Into<crate::config::Builder>,
129                        ) -> Self {
130                            self.set_config_override(::std::option::Option::Some(config_override.into()));
131                            self
132                        }
133    
134                        pub(crate) fn set_config_override(
135                            &mut self,
136                            config_override: ::std::option::Option<crate::config::Builder>,
137                        ) -> &mut Self {
138                            self.config_override = config_override;
139                            self
140                        }
141    /// <p>Specify the language code that represents the language spoken in your audio.</p><important>
142    /// <p>Amazon Transcribe Medical only supports US English (<code>en-US</code>).</p>
143    /// </important>
144    pub fn language_code(mut self, input: crate::types::LanguageCode) -> Self {
145                    self.inner = self.inner.language_code(input);
146                    self
147                }
148    /// <p>Specify the language code that represents the language spoken in your audio.</p><important>
149    /// <p>Amazon Transcribe Medical only supports US English (<code>en-US</code>).</p>
150    /// </important>
151    pub fn set_language_code(mut self, input: ::std::option::Option<crate::types::LanguageCode>) -> Self {
152                    self.inner = self.inner.set_language_code(input);
153                    self
154                }
155    /// <p>Specify the language code that represents the language spoken in your audio.</p><important>
156    /// <p>Amazon Transcribe Medical only supports US English (<code>en-US</code>).</p>
157    /// </important>
158    pub fn get_language_code(&self) -> &::std::option::Option<crate::types::LanguageCode> {
159                    self.inner.get_language_code()
160                }
161    /// <p>The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>
162    pub fn media_sample_rate_hertz(mut self, input: i32) -> Self {
163                    self.inner = self.inner.media_sample_rate_hertz(input);
164                    self
165                }
166    /// <p>The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>
167    pub fn set_media_sample_rate_hertz(mut self, input: ::std::option::Option<i32>) -> Self {
168                    self.inner = self.inner.set_media_sample_rate_hertz(input);
169                    self
170                }
171    /// <p>The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>
172    pub fn get_media_sample_rate_hertz(&self) -> &::std::option::Option<i32> {
173                    self.inner.get_media_sample_rate_hertz()
174                }
175    /// <p>Specify the encoding used for the input audio. Supported formats are:</p>
176    /// <ul>
177    /// <li>
178    /// <p>FLAC</p></li>
179    /// <li>
180    /// <p>OPUS-encoded audio in an Ogg container</p></li>
181    /// <li>
182    /// <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p></li>
183    /// </ul>
184    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio">Media formats</a>.</p>
185    pub fn media_encoding(mut self, input: crate::types::MediaEncoding) -> Self {
186                    self.inner = self.inner.media_encoding(input);
187                    self
188                }
189    /// <p>Specify the encoding used for the input audio. Supported formats are:</p>
190    /// <ul>
191    /// <li>
192    /// <p>FLAC</p></li>
193    /// <li>
194    /// <p>OPUS-encoded audio in an Ogg container</p></li>
195    /// <li>
196    /// <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p></li>
197    /// </ul>
198    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio">Media formats</a>.</p>
199    pub fn set_media_encoding(mut self, input: ::std::option::Option<crate::types::MediaEncoding>) -> Self {
200                    self.inner = self.inner.set_media_encoding(input);
201                    self
202                }
203    /// <p>Specify the encoding used for the input audio. Supported formats are:</p>
204    /// <ul>
205    /// <li>
206    /// <p>FLAC</p></li>
207    /// <li>
208    /// <p>OPUS-encoded audio in an Ogg container</p></li>
209    /// <li>
210    /// <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p></li>
211    /// </ul>
212    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio">Media formats</a>.</p>
213    pub fn get_media_encoding(&self) -> &::std::option::Option<crate::types::MediaEncoding> {
214                    self.inner.get_media_encoding()
215                }
216    /// <p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
217    pub fn vocabulary_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
218                    self.inner = self.inner.vocabulary_name(input.into());
219                    self
220                }
221    /// <p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
222    pub fn set_vocabulary_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
223                    self.inner = self.inner.set_vocabulary_name(input);
224                    self
225                }
226    /// <p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
227    pub fn get_vocabulary_name(&self) -> &::std::option::Option<::std::string::String> {
228                    self.inner.get_vocabulary_name()
229                }
230    /// <p>Specify the medical specialty contained in your audio.</p>
231    pub fn specialty(mut self, input: crate::types::Specialty) -> Self {
232                    self.inner = self.inner.specialty(input);
233                    self
234                }
235    /// <p>Specify the medical specialty contained in your audio.</p>
236    pub fn set_specialty(mut self, input: ::std::option::Option<crate::types::Specialty>) -> Self {
237                    self.inner = self.inner.set_specialty(input);
238                    self
239                }
240    /// <p>Specify the medical specialty contained in your audio.</p>
241    pub fn get_specialty(&self) -> &::std::option::Option<crate::types::Specialty> {
242                    self.inner.get_specialty()
243                }
244    /// <p>Specify the type of input audio. For example, choose <code>DICTATION</code> for a provider dictating patient notes and <code>CONVERSATION</code> for a dialogue between a patient and a medical professional.</p>
245    pub fn r#type(mut self, input: crate::types::Type) -> Self {
246                    self.inner = self.inner.r#type(input);
247                    self
248                }
249    /// <p>Specify the type of input audio. For example, choose <code>DICTATION</code> for a provider dictating patient notes and <code>CONVERSATION</code> for a dialogue between a patient and a medical professional.</p>
250    pub fn set_type(mut self, input: ::std::option::Option<crate::types::Type>) -> Self {
251                    self.inner = self.inner.set_type(input);
252                    self
253                }
254    /// <p>Specify the type of input audio. For example, choose <code>DICTATION</code> for a provider dictating patient notes and <code>CONVERSATION</code> for a dialogue between a patient and a medical professional.</p>
255    pub fn get_type(&self) -> &::std::option::Option<crate::types::Type> {
256                    self.inner.get_type()
257                }
258    /// <p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p>
259    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html">Partitioning speakers (diarization)</a>.</p>
260    pub fn show_speaker_label(mut self, input: bool) -> Self {
261                    self.inner = self.inner.show_speaker_label(input);
262                    self
263                }
264    /// <p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p>
265    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html">Partitioning speakers (diarization)</a>.</p>
266    pub fn set_show_speaker_label(mut self, input: ::std::option::Option<bool>) -> Self {
267                    self.inner = self.inner.set_show_speaker_label(input);
268                    self
269                }
270    /// <p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p>
271    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html">Partitioning speakers (diarization)</a>.</p>
272    pub fn get_show_speaker_label(&self) -> &::std::option::Option<bool> {
273                    self.inner.get_show_speaker_label()
274                }
275    /// <p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response.</p>
276    pub fn session_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
277                    self.inner = self.inner.session_id(input.into());
278                    self
279                }
280    /// <p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response.</p>
281    pub fn set_session_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
282                    self.inner = self.inner.set_session_id(input);
283                    self
284                }
285    /// <p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response.</p>
286    pub fn get_session_id(&self) -> &::std::option::Option<::std::string::String> {
287                    self.inner.get_session_id()
288                }
289    /// <p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p>
290    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
291    pub fn audio_stream(mut self, input: ::aws_smithy_http::event_stream::EventStreamSender<crate::types::AudioStream, crate::types::error::AudioStreamError>) -> Self {
292                    self.inner = self.inner.audio_stream(input);
293                    self
294                }
295    /// <p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p>
296    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
297    pub fn set_audio_stream(mut self, input: ::std::option::Option<::aws_smithy_http::event_stream::EventStreamSender<crate::types::AudioStream, crate::types::error::AudioStreamError>>) -> Self {
298                    self.inner = self.inner.set_audio_stream(input);
299                    self
300                }
301    /// <p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p>
302    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
303    pub fn get_audio_stream(&self) -> &::std::option::Option<::aws_smithy_http::event_stream::EventStreamSender<crate::types::AudioStream, crate::types::error::AudioStreamError>> {
304                    self.inner.get_audio_stream()
305                }
306    /// <p>Enables channel identification in multi-channel audio.</p>
307    /// <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p>
308    /// <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p>
309    /// <p>If you include <code>EnableChannelIdentification</code> in your request, you must also include <code>NumberOfChannels</code>.</p>
310    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html">Transcribing multi-channel audio</a>.</p>
311    pub fn enable_channel_identification(mut self, input: bool) -> Self {
312                    self.inner = self.inner.enable_channel_identification(input);
313                    self
314                }
315    /// <p>Enables channel identification in multi-channel audio.</p>
316    /// <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p>
317    /// <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p>
318    /// <p>If you include <code>EnableChannelIdentification</code> in your request, you must also include <code>NumberOfChannels</code>.</p>
319    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html">Transcribing multi-channel audio</a>.</p>
320    pub fn set_enable_channel_identification(mut self, input: ::std::option::Option<bool>) -> Self {
321                    self.inner = self.inner.set_enable_channel_identification(input);
322                    self
323                }
324    /// <p>Enables channel identification in multi-channel audio.</p>
325    /// <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p>
326    /// <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p>
327    /// <p>If you include <code>EnableChannelIdentification</code> in your request, you must also include <code>NumberOfChannels</code>.</p>
328    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html">Transcribing multi-channel audio</a>.</p>
329    pub fn get_enable_channel_identification(&self) -> &::std::option::Option<bool> {
330                    self.inner.get_enable_channel_identification()
331                }
332    /// <p>Specify the number of channels in your audio stream. This value must be <code>2</code>, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request.</p>
333    /// <p>If you include <code>NumberOfChannels</code> in your request, you must also include <code>EnableChannelIdentification</code>.</p>
334    pub fn number_of_channels(mut self, input: i32) -> Self {
335                    self.inner = self.inner.number_of_channels(input);
336                    self
337                }
338    /// <p>Specify the number of channels in your audio stream. This value must be <code>2</code>, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request.</p>
339    /// <p>If you include <code>NumberOfChannels</code> in your request, you must also include <code>EnableChannelIdentification</code>.</p>
340    pub fn set_number_of_channels(mut self, input: ::std::option::Option<i32>) -> Self {
341                    self.inner = self.inner.set_number_of_channels(input);
342                    self
343                }
344    /// <p>Specify the number of channels in your audio stream. This value must be <code>2</code>, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request.</p>
345    /// <p>If you include <code>NumberOfChannels</code> in your request, you must also include <code>EnableChannelIdentification</code>.</p>
346    pub fn get_number_of_channels(&self) -> &::std::option::Option<i32> {
347                    self.inner.get_number_of_channels()
348                }
349    /// <p>Labels all personal health information (PHI) identified in your transcript.</p>
350    /// <p>Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment.</p>
351    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html">Identifying personal health information (PHI) in a transcription</a>.</p>
352    pub fn content_identification_type(mut self, input: crate::types::MedicalContentIdentificationType) -> Self {
353                    self.inner = self.inner.content_identification_type(input);
354                    self
355                }
356    /// <p>Labels all personal health information (PHI) identified in your transcript.</p>
357    /// <p>Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment.</p>
358    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html">Identifying personal health information (PHI) in a transcription</a>.</p>
359    pub fn set_content_identification_type(mut self, input: ::std::option::Option<crate::types::MedicalContentIdentificationType>) -> Self {
360                    self.inner = self.inner.set_content_identification_type(input);
361                    self
362                }
363    /// <p>Labels all personal health information (PHI) identified in your transcript.</p>
364    /// <p>Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment.</p>
365    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html">Identifying personal health information (PHI) in a transcription</a>.</p>
366    pub fn get_content_identification_type(&self) -> &::std::option::Option<crate::types::MedicalContentIdentificationType> {
367                    self.inner.get_content_identification_type()
368                }
369}
370