aws_sdk_transcribestreaming/operation/start_medical_stream_transcription/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::start_medical_stream_transcription::_start_medical_stream_transcription_output::StartMedicalStreamTranscriptionOutputBuilder;
3
4pub use crate::operation::start_medical_stream_transcription::_start_medical_stream_transcription_input::StartMedicalStreamTranscriptionInputBuilder;
5
6impl crate::operation::start_medical_stream_transcription::builders::StartMedicalStreamTranscriptionInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.start_medical_stream_transcription();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `StartMedicalStreamTranscription`.
24///
25/// <p>Starts a bidirectional HTTP/2 or WebSocket stream where audio is streamed to Amazon Transcribe Medical and the transcription results are streamed to your application.</p>
26/// <p>The following parameters are required:</p>
27/// <ul>
28/// <li>
29/// <p><code>language-code</code></p></li>
30/// <li>
31/// <p><code>media-encoding</code></p></li>
32/// <li>
33/// <p><code>sample-rate</code></p></li>
34/// </ul>
35/// <p>For more information on streaming with Amazon Transcribe Medical, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
36#[derive(::std::fmt::Debug)]
37pub struct StartMedicalStreamTranscriptionFluentBuilder {
38    handle: ::std::sync::Arc<crate::client::Handle>,
39    inner: crate::operation::start_medical_stream_transcription::builders::StartMedicalStreamTranscriptionInputBuilder,
40    config_override: ::std::option::Option<crate::config::Builder>,
41}
42impl
43    crate::client::customize::internal::CustomizableSend<
44        crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput,
45        crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError,
46    > for StartMedicalStreamTranscriptionFluentBuilder
47{
48    fn send(
49        self,
50        config_override: crate::config::Builder,
51    ) -> crate::client::customize::internal::BoxFuture<
52        crate::client::customize::internal::SendResult<
53            crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput,
54            crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError,
55        >,
56    > {
57        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
58    }
59}
60impl StartMedicalStreamTranscriptionFluentBuilder {
61    /// Creates a new `StartMedicalStreamTranscriptionFluentBuilder`.
62    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
63        Self {
64            handle,
65            inner: ::std::default::Default::default(),
66            config_override: ::std::option::Option::None,
67        }
68    }
69    /// Access the StartMedicalStreamTranscription as a reference.
70    pub fn as_input(&self) -> &crate::operation::start_medical_stream_transcription::builders::StartMedicalStreamTranscriptionInputBuilder {
71        &self.inner
72    }
73    /// Sends the request and returns the response.
74    ///
75    /// If an error occurs, an `SdkError` will be returned with additional details that
76    /// can be matched against.
77    ///
78    /// By default, any retryable failures will be retried twice. Retry behavior
79    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
80    /// set when configuring the client.
81    pub async fn send(
82        self,
83    ) -> ::std::result::Result<
84        crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput,
85        ::aws_smithy_runtime_api::client::result::SdkError<
86            crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError,
87            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
88        >,
89    > {
90        let input = self
91            .inner
92            .build()
93            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
94        let runtime_plugins = crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscription::operation_runtime_plugins(
95            self.handle.runtime_plugins.clone(),
96            &self.handle.conf,
97            self.config_override,
98        );
99        crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscription::orchestrate(&runtime_plugins, input).await
100    }
101
102    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
103    pub fn customize(
104        self,
105    ) -> crate::client::customize::CustomizableOperation<
106        crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionOutput,
107        crate::operation::start_medical_stream_transcription::StartMedicalStreamTranscriptionError,
108        Self,
109    > {
110        crate::client::customize::CustomizableOperation::new(self)
111    }
112    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
113        self.set_config_override(::std::option::Option::Some(config_override.into()));
114        self
115    }
116
117    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
118        self.config_override = config_override;
119        self
120    }
121    /// <p>Specify the language code that represents the language spoken in your audio.</p><important>
122    /// <p>Amazon Transcribe Medical only supports US English (<code>en-US</code>).</p>
123    /// </important>
124    pub fn language_code(mut self, input: crate::types::LanguageCode) -> Self {
125        self.inner = self.inner.language_code(input);
126        self
127    }
128    /// <p>Specify the language code that represents the language spoken in your audio.</p><important>
129    /// <p>Amazon Transcribe Medical only supports US English (<code>en-US</code>).</p>
130    /// </important>
131    pub fn set_language_code(mut self, input: ::std::option::Option<crate::types::LanguageCode>) -> Self {
132        self.inner = self.inner.set_language_code(input);
133        self
134    }
135    /// <p>Specify the language code that represents the language spoken in your audio.</p><important>
136    /// <p>Amazon Transcribe Medical only supports US English (<code>en-US</code>).</p>
137    /// </important>
138    pub fn get_language_code(&self) -> &::std::option::Option<crate::types::LanguageCode> {
139        self.inner.get_language_code()
140    }
141    /// <p>The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>
142    pub fn media_sample_rate_hertz(mut self, input: i32) -> Self {
143        self.inner = self.inner.media_sample_rate_hertz(input);
144        self
145    }
146    /// <p>The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>
147    pub fn set_media_sample_rate_hertz(mut self, input: ::std::option::Option<i32>) -> Self {
148        self.inner = self.inner.set_media_sample_rate_hertz(input);
149        self
150    }
151    /// <p>The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>
152    pub fn get_media_sample_rate_hertz(&self) -> &::std::option::Option<i32> {
153        self.inner.get_media_sample_rate_hertz()
154    }
155    /// <p>Specify the encoding used for the input audio. Supported formats are:</p>
156    /// <ul>
157    /// <li>
158    /// <p>FLAC</p></li>
159    /// <li>
160    /// <p>OPUS-encoded audio in an Ogg container</p></li>
161    /// <li>
162    /// <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p></li>
163    /// </ul>
164    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio">Media formats</a>.</p>
165    pub fn media_encoding(mut self, input: crate::types::MediaEncoding) -> Self {
166        self.inner = self.inner.media_encoding(input);
167        self
168    }
169    /// <p>Specify the encoding used for the input audio. Supported formats are:</p>
170    /// <ul>
171    /// <li>
172    /// <p>FLAC</p></li>
173    /// <li>
174    /// <p>OPUS-encoded audio in an Ogg container</p></li>
175    /// <li>
176    /// <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p></li>
177    /// </ul>
178    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio">Media formats</a>.</p>
179    pub fn set_media_encoding(mut self, input: ::std::option::Option<crate::types::MediaEncoding>) -> Self {
180        self.inner = self.inner.set_media_encoding(input);
181        self
182    }
183    /// <p>Specify the encoding used for the input audio. Supported formats are:</p>
184    /// <ul>
185    /// <li>
186    /// <p>FLAC</p></li>
187    /// <li>
188    /// <p>OPUS-encoded audio in an Ogg container</p></li>
189    /// <li>
190    /// <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p></li>
191    /// </ul>
192    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio">Media formats</a>.</p>
193    pub fn get_media_encoding(&self) -> &::std::option::Option<crate::types::MediaEncoding> {
194        self.inner.get_media_encoding()
195    }
196    /// <p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
197    pub fn vocabulary_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
198        self.inner = self.inner.vocabulary_name(input.into());
199        self
200    }
201    /// <p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
202    pub fn set_vocabulary_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
203        self.inner = self.inner.set_vocabulary_name(input);
204        self
205    }
206    /// <p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
207    pub fn get_vocabulary_name(&self) -> &::std::option::Option<::std::string::String> {
208        self.inner.get_vocabulary_name()
209    }
210    /// <p>Specify the medical specialty contained in your audio.</p>
211    pub fn specialty(mut self, input: crate::types::Specialty) -> Self {
212        self.inner = self.inner.specialty(input);
213        self
214    }
215    /// <p>Specify the medical specialty contained in your audio.</p>
216    pub fn set_specialty(mut self, input: ::std::option::Option<crate::types::Specialty>) -> Self {
217        self.inner = self.inner.set_specialty(input);
218        self
219    }
220    /// <p>Specify the medical specialty contained in your audio.</p>
221    pub fn get_specialty(&self) -> &::std::option::Option<crate::types::Specialty> {
222        self.inner.get_specialty()
223    }
224    /// <p>Specify the type of input audio. For example, choose <code>DICTATION</code> for a provider dictating patient notes and <code>CONVERSATION</code> for a dialogue between a patient and a medical professional.</p>
225    pub fn r#type(mut self, input: crate::types::Type) -> Self {
226        self.inner = self.inner.r#type(input);
227        self
228    }
229    /// <p>Specify the type of input audio. For example, choose <code>DICTATION</code> for a provider dictating patient notes and <code>CONVERSATION</code> for a dialogue between a patient and a medical professional.</p>
230    pub fn set_type(mut self, input: ::std::option::Option<crate::types::Type>) -> Self {
231        self.inner = self.inner.set_type(input);
232        self
233    }
234    /// <p>Specify the type of input audio. For example, choose <code>DICTATION</code> for a provider dictating patient notes and <code>CONVERSATION</code> for a dialogue between a patient and a medical professional.</p>
235    pub fn get_type(&self) -> &::std::option::Option<crate::types::Type> {
236        self.inner.get_type()
237    }
238    /// <p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p>
239    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html">Partitioning speakers (diarization)</a>.</p>
240    pub fn show_speaker_label(mut self, input: bool) -> Self {
241        self.inner = self.inner.show_speaker_label(input);
242        self
243    }
244    /// <p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p>
245    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html">Partitioning speakers (diarization)</a>.</p>
246    pub fn set_show_speaker_label(mut self, input: ::std::option::Option<bool>) -> Self {
247        self.inner = self.inner.set_show_speaker_label(input);
248        self
249    }
250    /// <p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p>
251    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html">Partitioning speakers (diarization)</a>.</p>
252    pub fn get_show_speaker_label(&self) -> &::std::option::Option<bool> {
253        self.inner.get_show_speaker_label()
254    }
255    /// <p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response.</p>
256    /// <p>You can use a session ID to retry a streaming session.</p>
257    pub fn session_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
258        self.inner = self.inner.session_id(input.into());
259        self
260    }
261    /// <p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response.</p>
262    /// <p>You can use a session ID to retry a streaming session.</p>
263    pub fn set_session_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
264        self.inner = self.inner.set_session_id(input);
265        self
266    }
267    /// <p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response.</p>
268    /// <p>You can use a session ID to retry a streaming session.</p>
269    pub fn get_session_id(&self) -> &::std::option::Option<::std::string::String> {
270        self.inner.get_session_id()
271    }
272    /// <p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p>
273    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
274    pub fn audio_stream(
275        mut self,
276        input: ::aws_smithy_http::event_stream::EventStreamSender<crate::types::AudioStream, crate::types::error::AudioStreamError>,
277    ) -> Self {
278        self.inner = self.inner.audio_stream(input);
279        self
280    }
281    /// <p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p>
282    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
283    pub fn set_audio_stream(
284        mut self,
285        input: ::std::option::Option<
286            ::aws_smithy_http::event_stream::EventStreamSender<crate::types::AudioStream, crate::types::error::AudioStreamError>,
287        >,
288    ) -> Self {
289        self.inner = self.inner.set_audio_stream(input);
290        self
291    }
292    /// <p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p>
293    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
294    pub fn get_audio_stream(
295        &self,
296    ) -> &::std::option::Option<::aws_smithy_http::event_stream::EventStreamSender<crate::types::AudioStream, crate::types::error::AudioStreamError>>
297    {
298        self.inner.get_audio_stream()
299    }
300    /// <p>Enables channel identification in multi-channel audio.</p>
301    /// <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p>
302    /// <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p>
303    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html">Transcribing multi-channel audio</a>.</p>
304    pub fn enable_channel_identification(mut self, input: bool) -> Self {
305        self.inner = self.inner.enable_channel_identification(input);
306        self
307    }
308    /// <p>Enables channel identification in multi-channel audio.</p>
309    /// <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p>
310    /// <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p>
311    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html">Transcribing multi-channel audio</a>.</p>
312    pub fn set_enable_channel_identification(mut self, input: ::std::option::Option<bool>) -> Self {
313        self.inner = self.inner.set_enable_channel_identification(input);
314        self
315    }
316    /// <p>Enables channel identification in multi-channel audio.</p>
317    /// <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p>
318    /// <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p>
319    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html">Transcribing multi-channel audio</a>.</p>
320    pub fn get_enable_channel_identification(&self) -> &::std::option::Option<bool> {
321        self.inner.get_enable_channel_identification()
322    }
323    /// <p>Specify the number of channels in your audio stream. Up to two channels are supported.</p>
324    pub fn number_of_channels(mut self, input: i32) -> Self {
325        self.inner = self.inner.number_of_channels(input);
326        self
327    }
328    /// <p>Specify the number of channels in your audio stream. Up to two channels are supported.</p>
329    pub fn set_number_of_channels(mut self, input: ::std::option::Option<i32>) -> Self {
330        self.inner = self.inner.set_number_of_channels(input);
331        self
332    }
333    /// <p>Specify the number of channels in your audio stream. Up to two channels are supported.</p>
334    pub fn get_number_of_channels(&self) -> &::std::option::Option<i32> {
335        self.inner.get_number_of_channels()
336    }
337    /// <p>Labels all personal health information (PHI) identified in your transcript.</p>
338    /// <p>Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment.</p>
339    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html">Identifying personal health information (PHI) in a transcription</a>.</p>
340    pub fn content_identification_type(mut self, input: crate::types::MedicalContentIdentificationType) -> Self {
341        self.inner = self.inner.content_identification_type(input);
342        self
343    }
344    /// <p>Labels all personal health information (PHI) identified in your transcript.</p>
345    /// <p>Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment.</p>
346    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html">Identifying personal health information (PHI) in a transcription</a>.</p>
347    pub fn set_content_identification_type(mut self, input: ::std::option::Option<crate::types::MedicalContentIdentificationType>) -> Self {
348        self.inner = self.inner.set_content_identification_type(input);
349        self
350    }
351    /// <p>Labels all personal health information (PHI) identified in your transcript.</p>
352    /// <p>Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment.</p>
353    /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html">Identifying personal health information (PHI) in a transcription</a>.</p>
354    pub fn get_content_identification_type(&self) -> &::std::option::Option<crate::types::MedicalContentIdentificationType> {
355        self.inner.get_content_identification_type()
356    }
357}