aws_sdk_transcribestreaming/operation/start_stream_transcription/builders.rs
1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::start_stream_transcription::_start_stream_transcription_output::StartStreamTranscriptionOutputBuilder;
3
4pub use crate::operation::start_stream_transcription::_start_stream_transcription_input::StartStreamTranscriptionInputBuilder;
5
6impl crate::operation::start_stream_transcription::builders::StartStreamTranscriptionInputBuilder {
7 /// Sends a request with this input using the given client.
8 pub async fn send_with(self, client: &crate::Client) -> ::std::result::Result<
9 crate::operation::start_stream_transcription::StartStreamTranscriptionOutput,
10 ::aws_smithy_runtime_api::client::result::SdkError<
11 crate::operation::start_stream_transcription::StartStreamTranscriptionError,
12 ::aws_smithy_runtime_api::client::orchestrator::HttpResponse
13 >
14 > {
15 let mut fluent_builder = client.start_stream_transcription();
16 fluent_builder.inner = self;
17 fluent_builder.send().await
18 }
19 }
20/// Fluent builder constructing a request to `StartStreamTranscription`.
21///
22/// <p>Starts a bidirectional HTTP/2 or WebSocket stream where audio is streamed to Amazon Transcribe and the transcription results are streamed to your application.</p>
23/// <p>The following parameters are required:</p>
24/// <ul>
25/// <li>
26/// <p><code>language-code</code> or <code>identify-language</code> or <code>identify-multiple-language</code></p></li>
27/// <li>
28/// <p><code>media-encoding</code></p></li>
29/// <li>
30/// <p><code>sample-rate</code></p></li>
31/// </ul>
32/// <p>For more information on streaming with Amazon Transcribe, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
33#[derive(::std::fmt::Debug)]
34pub struct StartStreamTranscriptionFluentBuilder {
35 handle: ::std::sync::Arc<crate::client::Handle>,
36 inner: crate::operation::start_stream_transcription::builders::StartStreamTranscriptionInputBuilder,
37config_override: ::std::option::Option<crate::config::Builder>,
38 }
39impl
40 crate::client::customize::internal::CustomizableSend<
41 crate::operation::start_stream_transcription::StartStreamTranscriptionOutput,
42 crate::operation::start_stream_transcription::StartStreamTranscriptionError,
43 > for StartStreamTranscriptionFluentBuilder
44 {
45 fn send(
46 self,
47 config_override: crate::config::Builder,
48 ) -> crate::client::customize::internal::BoxFuture<
49 crate::client::customize::internal::SendResult<
50 crate::operation::start_stream_transcription::StartStreamTranscriptionOutput,
51 crate::operation::start_stream_transcription::StartStreamTranscriptionError,
52 >,
53 > {
54 ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
55 }
56 }
57impl StartStreamTranscriptionFluentBuilder {
58 /// Creates a new `StartStreamTranscriptionFluentBuilder`.
59 pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
60 Self {
61 handle,
62 inner: ::std::default::Default::default(),
63 config_override: ::std::option::Option::None,
64 }
65 }
66 /// Access the StartStreamTranscription as a reference.
67 pub fn as_input(&self) -> &crate::operation::start_stream_transcription::builders::StartStreamTranscriptionInputBuilder {
68 &self.inner
69 }
70 /// Sends the request and returns the response.
71 ///
72 /// If an error occurs, an `SdkError` will be returned with additional details that
73 /// can be matched against.
74 ///
75 /// By default, any retryable failures will be retried twice. Retry behavior
76 /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
77 /// set when configuring the client. Note: retries are enabled by default when using
78 /// `aws_config::load_from_env()` or when using `BehaviorVersion::v2025_01_17()` or later.
79 pub async fn send(self) -> ::std::result::Result<crate::operation::start_stream_transcription::StartStreamTranscriptionOutput, ::aws_smithy_runtime_api::client::result::SdkError<crate::operation::start_stream_transcription::StartStreamTranscriptionError, ::aws_smithy_runtime_api::client::orchestrator::HttpResponse>> {
80 let input = self.inner.build().map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
81 let runtime_plugins = crate::operation::start_stream_transcription::StartStreamTranscription::operation_runtime_plugins(
82 self.handle.runtime_plugins.clone(),
83 &self.handle.conf,
84 self.config_override,
85 );
86 let mut output =
87 crate::operation::start_stream_transcription::StartStreamTranscription::orchestrate(
88 &runtime_plugins,
89 input,
90 )
91 .await?;
92
93 // Converts any error encountered beyond this point into an `SdkError` response error
94 // with an `HttpResponse`. However, since we have already exited the `orchestrate`
95 // function, the original `HttpResponse` is no longer available and cannot be restored.
96 // This means that header information from the original response has been lost.
97 //
98 // Note that the response body would have been consumed by the deserializer
99 // regardless, even if the initial message was hypothetically processed during
100 // the orchestrator's deserialization phase but later resulted in an error.
101 fn response_error(
102 err: impl ::std::convert::Into<::aws_smithy_runtime_api::box_error::BoxError>
103 ) -> ::aws_smithy_runtime_api::client::result::SdkError<crate::operation::start_stream_transcription::StartStreamTranscriptionError, ::aws_smithy_runtime_api::client::orchestrator::HttpResponse> {
104 ::aws_smithy_runtime_api::client::result::SdkError::response_error(err, ::aws_smithy_runtime_api::client::orchestrator::HttpResponse::new(
105 ::aws_smithy_runtime_api::http::StatusCode::try_from(200).expect("valid successful code"),
106 ::aws_smithy_types::body::SdkBody::empty()))
107 }
108
109 let message = output.transcript_result_stream.try_recv_initial_response().await.map_err(response_error)?;
110
111 match message {
112 ::std::option::Option::Some(_message) => {
113
114 ::std::result::Result::Ok(output)
115 }
116 ::std::option::Option::None => ::std::result::Result::Ok(output),
117 }
118 }
119
120 /// Consumes this builder, creating a customizable operation that can be modified before being sent.
121 pub fn customize(
122 self,
123 ) -> crate::client::customize::CustomizableOperation<crate::operation::start_stream_transcription::StartStreamTranscriptionOutput, crate::operation::start_stream_transcription::StartStreamTranscriptionError, Self> {
124 crate::client::customize::CustomizableOperation::new(self)
125 }
126 pub(crate) fn config_override(
127 mut self,
128 config_override: impl ::std::convert::Into<crate::config::Builder>,
129 ) -> Self {
130 self.set_config_override(::std::option::Option::Some(config_override.into()));
131 self
132 }
133
134 pub(crate) fn set_config_override(
135 &mut self,
136 config_override: ::std::option::Option<crate::config::Builder>,
137 ) -> &mut Self {
138 self.config_override = config_override;
139 self
140 }
141 /// <p>Specify the language code that represents the language spoken in your audio.</p>
142 /// <p>If you're unsure of the language spoken in your audio, consider using <code>IdentifyLanguage</code> to enable automatic language identification.</p>
143 /// <p>For a list of languages supported with Amazon Transcribe streaming, refer to the <a href="https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html">Supported languages</a> table.</p>
144 pub fn language_code(mut self, input: crate::types::LanguageCode) -> Self {
145 self.inner = self.inner.language_code(input);
146 self
147 }
148 /// <p>Specify the language code that represents the language spoken in your audio.</p>
149 /// <p>If you're unsure of the language spoken in your audio, consider using <code>IdentifyLanguage</code> to enable automatic language identification.</p>
150 /// <p>For a list of languages supported with Amazon Transcribe streaming, refer to the <a href="https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html">Supported languages</a> table.</p>
151 pub fn set_language_code(mut self, input: ::std::option::Option<crate::types::LanguageCode>) -> Self {
152 self.inner = self.inner.set_language_code(input);
153 self
154 }
155 /// <p>Specify the language code that represents the language spoken in your audio.</p>
156 /// <p>If you're unsure of the language spoken in your audio, consider using <code>IdentifyLanguage</code> to enable automatic language identification.</p>
157 /// <p>For a list of languages supported with Amazon Transcribe streaming, refer to the <a href="https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html">Supported languages</a> table.</p>
158 pub fn get_language_code(&self) -> &::std::option::Option<crate::types::LanguageCode> {
159 self.inner.get_language_code()
160 }
161 /// <p>The sample rate of the input audio (in hertz). Low-quality audio, such as telephone audio, is typically around 8,000 Hz. High-quality audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>
162 pub fn media_sample_rate_hertz(mut self, input: i32) -> Self {
163 self.inner = self.inner.media_sample_rate_hertz(input);
164 self
165 }
166 /// <p>The sample rate of the input audio (in hertz). Low-quality audio, such as telephone audio, is typically around 8,000 Hz. High-quality audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>
167 pub fn set_media_sample_rate_hertz(mut self, input: ::std::option::Option<i32>) -> Self {
168 self.inner = self.inner.set_media_sample_rate_hertz(input);
169 self
170 }
171 /// <p>The sample rate of the input audio (in hertz). Low-quality audio, such as telephone audio, is typically around 8,000 Hz. High-quality audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>
172 pub fn get_media_sample_rate_hertz(&self) -> &::std::option::Option<i32> {
173 self.inner.get_media_sample_rate_hertz()
174 }
175 /// <p>Specify the encoding of your input audio. Supported formats are:</p>
176 /// <ul>
177 /// <li>
178 /// <p>FLAC</p></li>
179 /// <li>
180 /// <p>OPUS-encoded audio in an Ogg container</p></li>
181 /// <li>
182 /// <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p></li>
183 /// </ul>
184 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio">Media formats</a>.</p>
185 pub fn media_encoding(mut self, input: crate::types::MediaEncoding) -> Self {
186 self.inner = self.inner.media_encoding(input);
187 self
188 }
189 /// <p>Specify the encoding of your input audio. Supported formats are:</p>
190 /// <ul>
191 /// <li>
192 /// <p>FLAC</p></li>
193 /// <li>
194 /// <p>OPUS-encoded audio in an Ogg container</p></li>
195 /// <li>
196 /// <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p></li>
197 /// </ul>
198 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio">Media formats</a>.</p>
199 pub fn set_media_encoding(mut self, input: ::std::option::Option<crate::types::MediaEncoding>) -> Self {
200 self.inner = self.inner.set_media_encoding(input);
201 self
202 }
203 /// <p>Specify the encoding of your input audio. Supported formats are:</p>
204 /// <ul>
205 /// <li>
206 /// <p>FLAC</p></li>
207 /// <li>
208 /// <p>OPUS-encoded audio in an Ogg container</p></li>
209 /// <li>
210 /// <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p></li>
211 /// </ul>
212 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio">Media formats</a>.</p>
213 pub fn get_media_encoding(&self) -> &::std::option::Option<crate::types::MediaEncoding> {
214 self.inner.get_media_encoding()
215 }
216 /// <p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
217 /// <p>If the language of the specified custom vocabulary doesn't match the language identified in your media, the custom vocabulary is not applied to your transcription.</p><important>
218 /// <p>This parameter is <b>not</b> intended for use with the <code>IdentifyLanguage</code> parameter. If you're including <code>IdentifyLanguage</code> in your request and want to use one or more custom vocabularies with your transcription, use the <code>VocabularyNames</code> parameter instead.</p>
219 /// </important>
220 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html">Custom vocabularies</a>.</p>
221 pub fn vocabulary_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
222 self.inner = self.inner.vocabulary_name(input.into());
223 self
224 }
225 /// <p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
226 /// <p>If the language of the specified custom vocabulary doesn't match the language identified in your media, the custom vocabulary is not applied to your transcription.</p><important>
227 /// <p>This parameter is <b>not</b> intended for use with the <code>IdentifyLanguage</code> parameter. If you're including <code>IdentifyLanguage</code> in your request and want to use one or more custom vocabularies with your transcription, use the <code>VocabularyNames</code> parameter instead.</p>
228 /// </important>
229 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html">Custom vocabularies</a>.</p>
230 pub fn set_vocabulary_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
231 self.inner = self.inner.set_vocabulary_name(input);
232 self
233 }
234 /// <p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
235 /// <p>If the language of the specified custom vocabulary doesn't match the language identified in your media, the custom vocabulary is not applied to your transcription.</p><important>
236 /// <p>This parameter is <b>not</b> intended for use with the <code>IdentifyLanguage</code> parameter. If you're including <code>IdentifyLanguage</code> in your request and want to use one or more custom vocabularies with your transcription, use the <code>VocabularyNames</code> parameter instead.</p>
237 /// </important>
238 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html">Custom vocabularies</a>.</p>
239 pub fn get_vocabulary_name(&self) -> &::std::option::Option<::std::string::String> {
240 self.inner.get_vocabulary_name()
241 }
242 /// <p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response.</p>
243 pub fn session_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
244 self.inner = self.inner.session_id(input.into());
245 self
246 }
247 /// <p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response.</p>
248 pub fn set_session_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
249 self.inner = self.inner.set_session_id(input);
250 self
251 }
252 /// <p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response.</p>
253 pub fn get_session_id(&self) -> &::std::option::Option<::std::string::String> {
254 self.inner.get_session_id()
255 }
256 /// <p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p>
257 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
258 pub fn audio_stream(mut self, input: ::aws_smithy_http::event_stream::EventStreamSender<crate::types::AudioStream, crate::types::error::AudioStreamError>) -> Self {
259 self.inner = self.inner.audio_stream(input);
260 self
261 }
262 /// <p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p>
263 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
264 pub fn set_audio_stream(mut self, input: ::std::option::Option<::aws_smithy_http::event_stream::EventStreamSender<crate::types::AudioStream, crate::types::error::AudioStreamError>>) -> Self {
265 self.inner = self.inner.set_audio_stream(input);
266 self
267 }
268 /// <p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p>
269 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html">Transcribing streaming audio</a>.</p>
270 pub fn get_audio_stream(&self) -> &::std::option::Option<::aws_smithy_http::event_stream::EventStreamSender<crate::types::AudioStream, crate::types::error::AudioStreamError>> {
271 self.inner.get_audio_stream()
272 }
273 /// <p>Specify the name of the custom vocabulary filter that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive.</p>
274 /// <p>If the language of the specified custom vocabulary filter doesn't match the language identified in your media, the vocabulary filter is not applied to your transcription.</p><important>
275 /// <p>This parameter is <b>not</b> intended for use with the <code>IdentifyLanguage</code> parameter. If you're including <code>IdentifyLanguage</code> in your request and want to use one or more vocabulary filters with your transcription, use the <code>VocabularyFilterNames</code> parameter instead.</p>
276 /// </important>
277 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html">Using vocabulary filtering with unwanted words</a>.</p>
278 pub fn vocabulary_filter_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
279 self.inner = self.inner.vocabulary_filter_name(input.into());
280 self
281 }
282 /// <p>Specify the name of the custom vocabulary filter that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive.</p>
283 /// <p>If the language of the specified custom vocabulary filter doesn't match the language identified in your media, the vocabulary filter is not applied to your transcription.</p><important>
284 /// <p>This parameter is <b>not</b> intended for use with the <code>IdentifyLanguage</code> parameter. If you're including <code>IdentifyLanguage</code> in your request and want to use one or more vocabulary filters with your transcription, use the <code>VocabularyFilterNames</code> parameter instead.</p>
285 /// </important>
286 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html">Using vocabulary filtering with unwanted words</a>.</p>
287 pub fn set_vocabulary_filter_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
288 self.inner = self.inner.set_vocabulary_filter_name(input);
289 self
290 }
291 /// <p>Specify the name of the custom vocabulary filter that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive.</p>
292 /// <p>If the language of the specified custom vocabulary filter doesn't match the language identified in your media, the vocabulary filter is not applied to your transcription.</p><important>
293 /// <p>This parameter is <b>not</b> intended for use with the <code>IdentifyLanguage</code> parameter. If you're including <code>IdentifyLanguage</code> in your request and want to use one or more vocabulary filters with your transcription, use the <code>VocabularyFilterNames</code> parameter instead.</p>
294 /// </important>
295 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html">Using vocabulary filtering with unwanted words</a>.</p>
296 pub fn get_vocabulary_filter_name(&self) -> &::std::option::Option<::std::string::String> {
297 self.inner.get_vocabulary_filter_name()
298 }
299 /// <p>Specify how you want your vocabulary filter applied to your transcript.</p>
300 /// <p>To replace words with <code>***</code>, choose <code>mask</code>.</p>
301 /// <p>To delete words, choose <code>remove</code>.</p>
302 /// <p>To flag words without changing them, choose <code>tag</code>.</p>
303 pub fn vocabulary_filter_method(mut self, input: crate::types::VocabularyFilterMethod) -> Self {
304 self.inner = self.inner.vocabulary_filter_method(input);
305 self
306 }
307 /// <p>Specify how you want your vocabulary filter applied to your transcript.</p>
308 /// <p>To replace words with <code>***</code>, choose <code>mask</code>.</p>
309 /// <p>To delete words, choose <code>remove</code>.</p>
310 /// <p>To flag words without changing them, choose <code>tag</code>.</p>
311 pub fn set_vocabulary_filter_method(mut self, input: ::std::option::Option<crate::types::VocabularyFilterMethod>) -> Self {
312 self.inner = self.inner.set_vocabulary_filter_method(input);
313 self
314 }
315 /// <p>Specify how you want your vocabulary filter applied to your transcript.</p>
316 /// <p>To replace words with <code>***</code>, choose <code>mask</code>.</p>
317 /// <p>To delete words, choose <code>remove</code>.</p>
318 /// <p>To flag words without changing them, choose <code>tag</code>.</p>
319 pub fn get_vocabulary_filter_method(&self) -> &::std::option::Option<crate::types::VocabularyFilterMethod> {
320 self.inner.get_vocabulary_filter_method()
321 }
322 /// <p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p>
323 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html">Partitioning speakers (diarization)</a>.</p>
324 pub fn show_speaker_label(mut self, input: bool) -> Self {
325 self.inner = self.inner.show_speaker_label(input);
326 self
327 }
328 /// <p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p>
329 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html">Partitioning speakers (diarization)</a>.</p>
330 pub fn set_show_speaker_label(mut self, input: ::std::option::Option<bool>) -> Self {
331 self.inner = self.inner.set_show_speaker_label(input);
332 self
333 }
334 /// <p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p>
335 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html">Partitioning speakers (diarization)</a>.</p>
336 pub fn get_show_speaker_label(&self) -> &::std::option::Option<bool> {
337 self.inner.get_show_speaker_label()
338 }
339 /// <p>Enables channel identification in multi-channel audio.</p>
340 /// <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p>
341 /// <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p>
342 /// <p>If you include <code>EnableChannelIdentification</code> in your request, you must also include <code>NumberOfChannels</code>.</p>
343 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html">Transcribing multi-channel audio</a>.</p>
344 pub fn enable_channel_identification(mut self, input: bool) -> Self {
345 self.inner = self.inner.enable_channel_identification(input);
346 self
347 }
348 /// <p>Enables channel identification in multi-channel audio.</p>
349 /// <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p>
350 /// <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p>
351 /// <p>If you include <code>EnableChannelIdentification</code> in your request, you must also include <code>NumberOfChannels</code>.</p>
352 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html">Transcribing multi-channel audio</a>.</p>
353 pub fn set_enable_channel_identification(mut self, input: ::std::option::Option<bool>) -> Self {
354 self.inner = self.inner.set_enable_channel_identification(input);
355 self
356 }
357 /// <p>Enables channel identification in multi-channel audio.</p>
358 /// <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p>
359 /// <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p>
360 /// <p>If you include <code>EnableChannelIdentification</code> in your request, you must also include <code>NumberOfChannels</code>.</p>
361 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html">Transcribing multi-channel audio</a>.</p>
362 pub fn get_enable_channel_identification(&self) -> &::std::option::Option<bool> {
363 self.inner.get_enable_channel_identification()
364 }
365 /// <p>Specify the number of channels in your audio stream. This value must be <code>2</code>, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request.</p>
366 /// <p>If you include <code>NumberOfChannels</code> in your request, you must also include <code>EnableChannelIdentification</code>.</p>
367 pub fn number_of_channels(mut self, input: i32) -> Self {
368 self.inner = self.inner.number_of_channels(input);
369 self
370 }
371 /// <p>Specify the number of channels in your audio stream. This value must be <code>2</code>, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request.</p>
372 /// <p>If you include <code>NumberOfChannels</code> in your request, you must also include <code>EnableChannelIdentification</code>.</p>
373 pub fn set_number_of_channels(mut self, input: ::std::option::Option<i32>) -> Self {
374 self.inner = self.inner.set_number_of_channels(input);
375 self
376 }
377 /// <p>Specify the number of channels in your audio stream. This value must be <code>2</code>, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request.</p>
378 /// <p>If you include <code>NumberOfChannels</code> in your request, you must also include <code>EnableChannelIdentification</code>.</p>
379 pub fn get_number_of_channels(&self) -> &::std::option::Option<i32> {
380 self.inner.get_number_of_channels()
381 }
382 /// <p>Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization">Partial-result stabilization</a>.</p>
383 pub fn enable_partial_results_stabilization(mut self, input: bool) -> Self {
384 self.inner = self.inner.enable_partial_results_stabilization(input);
385 self
386 }
387 /// <p>Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization">Partial-result stabilization</a>.</p>
388 pub fn set_enable_partial_results_stabilization(mut self, input: ::std::option::Option<bool>) -> Self {
389 self.inner = self.inner.set_enable_partial_results_stabilization(input);
390 self
391 }
392 /// <p>Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization">Partial-result stabilization</a>.</p>
393 pub fn get_enable_partial_results_stabilization(&self) -> &::std::option::Option<bool> {
394 self.inner.get_enable_partial_results_stabilization()
395 }
396 /// <p>Specify the level of stability to use when you enable partial results stabilization (<code>EnablePartialResultsStabilization</code>).</p>
397 /// <p>Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy.</p>
398 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization">Partial-result stabilization</a>.</p>
399 pub fn partial_results_stability(mut self, input: crate::types::PartialResultsStability) -> Self {
400 self.inner = self.inner.partial_results_stability(input);
401 self
402 }
403 /// <p>Specify the level of stability to use when you enable partial results stabilization (<code>EnablePartialResultsStabilization</code>).</p>
404 /// <p>Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy.</p>
405 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization">Partial-result stabilization</a>.</p>
406 pub fn set_partial_results_stability(mut self, input: ::std::option::Option<crate::types::PartialResultsStability>) -> Self {
407 self.inner = self.inner.set_partial_results_stability(input);
408 self
409 }
410 /// <p>Specify the level of stability to use when you enable partial results stabilization (<code>EnablePartialResultsStabilization</code>).</p>
411 /// <p>Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy.</p>
412 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization">Partial-result stabilization</a>.</p>
413 pub fn get_partial_results_stability(&self) -> &::std::option::Option<crate::types::PartialResultsStability> {
414 self.inner.get_partial_results_stability()
415 }
416 /// <p>Labels all personally identifiable information (PII) identified in your transcript.</p>
417 /// <p>Content identification is performed at the segment level; PII specified in <code>PiiEntityTypes</code> is flagged upon complete transcription of an audio segment. If you don't include <code>PiiEntityTypes</code> in your request, all PII is identified.</p>
418 /// <p>You can’t set <code>ContentIdentificationType</code> and <code>ContentRedactionType</code> in the same request. If you set both, your request returns a <code>BadRequestException</code>.</p>
419 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html">Redacting or identifying personally identifiable information</a>.</p>
420 pub fn content_identification_type(mut self, input: crate::types::ContentIdentificationType) -> Self {
421 self.inner = self.inner.content_identification_type(input);
422 self
423 }
424 /// <p>Labels all personally identifiable information (PII) identified in your transcript.</p>
425 /// <p>Content identification is performed at the segment level; PII specified in <code>PiiEntityTypes</code> is flagged upon complete transcription of an audio segment. If you don't include <code>PiiEntityTypes</code> in your request, all PII is identified.</p>
426 /// <p>You can’t set <code>ContentIdentificationType</code> and <code>ContentRedactionType</code> in the same request. If you set both, your request returns a <code>BadRequestException</code>.</p>
427 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html">Redacting or identifying personally identifiable information</a>.</p>
428 pub fn set_content_identification_type(mut self, input: ::std::option::Option<crate::types::ContentIdentificationType>) -> Self {
429 self.inner = self.inner.set_content_identification_type(input);
430 self
431 }
432 /// <p>Labels all personally identifiable information (PII) identified in your transcript.</p>
433 /// <p>Content identification is performed at the segment level; PII specified in <code>PiiEntityTypes</code> is flagged upon complete transcription of an audio segment. If you don't include <code>PiiEntityTypes</code> in your request, all PII is identified.</p>
434 /// <p>You can’t set <code>ContentIdentificationType</code> and <code>ContentRedactionType</code> in the same request. If you set both, your request returns a <code>BadRequestException</code>.</p>
435 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html">Redacting or identifying personally identifiable information</a>.</p>
436 pub fn get_content_identification_type(&self) -> &::std::option::Option<crate::types::ContentIdentificationType> {
437 self.inner.get_content_identification_type()
438 }
439 /// <p>Redacts all personally identifiable information (PII) identified in your transcript.</p>
440 /// <p>Content redaction is performed at the segment level; PII specified in <code>PiiEntityTypes</code> is redacted upon complete transcription of an audio segment. If you don't include <code>PiiEntityTypes</code> in your request, all PII is redacted.</p>
441 /// <p>You can’t set <code>ContentRedactionType</code> and <code>ContentIdentificationType</code> in the same request. If you set both, your request returns a <code>BadRequestException</code>.</p>
442 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html">Redacting or identifying personally identifiable information</a>.</p>
443 pub fn content_redaction_type(mut self, input: crate::types::ContentRedactionType) -> Self {
444 self.inner = self.inner.content_redaction_type(input);
445 self
446 }
447 /// <p>Redacts all personally identifiable information (PII) identified in your transcript.</p>
448 /// <p>Content redaction is performed at the segment level; PII specified in <code>PiiEntityTypes</code> is redacted upon complete transcription of an audio segment. If you don't include <code>PiiEntityTypes</code> in your request, all PII is redacted.</p>
449 /// <p>You can’t set <code>ContentRedactionType</code> and <code>ContentIdentificationType</code> in the same request. If you set both, your request returns a <code>BadRequestException</code>.</p>
450 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html">Redacting or identifying personally identifiable information</a>.</p>
451 pub fn set_content_redaction_type(mut self, input: ::std::option::Option<crate::types::ContentRedactionType>) -> Self {
452 self.inner = self.inner.set_content_redaction_type(input);
453 self
454 }
455 /// <p>Redacts all personally identifiable information (PII) identified in your transcript.</p>
456 /// <p>Content redaction is performed at the segment level; PII specified in <code>PiiEntityTypes</code> is redacted upon complete transcription of an audio segment. If you don't include <code>PiiEntityTypes</code> in your request, all PII is redacted.</p>
457 /// <p>You can’t set <code>ContentRedactionType</code> and <code>ContentIdentificationType</code> in the same request. If you set both, your request returns a <code>BadRequestException</code>.</p>
458 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html">Redacting or identifying personally identifiable information</a>.</p>
459 pub fn get_content_redaction_type(&self) -> &::std::option::Option<crate::types::ContentRedactionType> {
460 self.inner.get_content_redaction_type()
461 }
462 /// <p>Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select <code>ALL</code>.</p>
463 /// <p>Values must be comma-separated and can include: <code>ADDRESS</code>, <code>BANK_ACCOUNT_NUMBER</code>, <code>BANK_ROUTING</code>, <code>CREDIT_DEBIT_CVV</code>, <code>CREDIT_DEBIT_EXPIRY</code>, <code>CREDIT_DEBIT_NUMBER</code>, <code>EMAIL</code>, <code>NAME</code>, <code>PHONE</code>, <code>PIN</code>, <code>SSN</code>, or <code>ALL</code>.</p>
464 /// <p>Note that if you include <code>PiiEntityTypes</code> in your request, you must also include <code>ContentIdentificationType</code> or <code>ContentRedactionType</code>.</p>
465 /// <p>If you include <code>ContentRedactionType</code> or <code>ContentIdentificationType</code> in your request, but do not include <code>PiiEntityTypes</code>, all PII is redacted or identified.</p>
466 pub fn pii_entity_types(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
467 self.inner = self.inner.pii_entity_types(input.into());
468 self
469 }
470 /// <p>Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select <code>ALL</code>.</p>
471 /// <p>Values must be comma-separated and can include: <code>ADDRESS</code>, <code>BANK_ACCOUNT_NUMBER</code>, <code>BANK_ROUTING</code>, <code>CREDIT_DEBIT_CVV</code>, <code>CREDIT_DEBIT_EXPIRY</code>, <code>CREDIT_DEBIT_NUMBER</code>, <code>EMAIL</code>, <code>NAME</code>, <code>PHONE</code>, <code>PIN</code>, <code>SSN</code>, or <code>ALL</code>.</p>
472 /// <p>Note that if you include <code>PiiEntityTypes</code> in your request, you must also include <code>ContentIdentificationType</code> or <code>ContentRedactionType</code>.</p>
473 /// <p>If you include <code>ContentRedactionType</code> or <code>ContentIdentificationType</code> in your request, but do not include <code>PiiEntityTypes</code>, all PII is redacted or identified.</p>
474 pub fn set_pii_entity_types(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
475 self.inner = self.inner.set_pii_entity_types(input);
476 self
477 }
478 /// <p>Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select <code>ALL</code>.</p>
479 /// <p>Values must be comma-separated and can include: <code>ADDRESS</code>, <code>BANK_ACCOUNT_NUMBER</code>, <code>BANK_ROUTING</code>, <code>CREDIT_DEBIT_CVV</code>, <code>CREDIT_DEBIT_EXPIRY</code>, <code>CREDIT_DEBIT_NUMBER</code>, <code>EMAIL</code>, <code>NAME</code>, <code>PHONE</code>, <code>PIN</code>, <code>SSN</code>, or <code>ALL</code>.</p>
480 /// <p>Note that if you include <code>PiiEntityTypes</code> in your request, you must also include <code>ContentIdentificationType</code> or <code>ContentRedactionType</code>.</p>
481 /// <p>If you include <code>ContentRedactionType</code> or <code>ContentIdentificationType</code> in your request, but do not include <code>PiiEntityTypes</code>, all PII is redacted or identified.</p>
482 pub fn get_pii_entity_types(&self) -> &::std::option::Option<::std::string::String> {
483 self.inner.get_pii_entity_types()
484 }
485 /// <p>Specify the name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive.</p>
486 /// <p>The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.</p>
487 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html">Custom language models</a>.</p>
488 pub fn language_model_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
489 self.inner = self.inner.language_model_name(input.into());
490 self
491 }
492 /// <p>Specify the name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive.</p>
493 /// <p>The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.</p>
494 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html">Custom language models</a>.</p>
495 pub fn set_language_model_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
496 self.inner = self.inner.set_language_model_name(input);
497 self
498 }
499 /// <p>Specify the name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive.</p>
500 /// <p>The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.</p>
501 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html">Custom language models</a>.</p>
502 pub fn get_language_model_name(&self) -> &::std::option::Option<::std::string::String> {
503 self.inner.get_language_model_name()
504 }
505 /// <p>Enables automatic language identification for your transcription.</p>
506 /// <p>If you include <code>IdentifyLanguage</code>, you must include a list of language codes, using <code>LanguageOptions</code>, that you think may be present in your audio stream.</p>
507 /// <p>You can also include a preferred language using <code>PreferredLanguage</code>. Adding a preferred language can help Amazon Transcribe identify the language faster than if you omit this parameter.</p>
508 /// <p>If you have multi-channel audio that contains different languages on each channel, and you've enabled channel identification, automatic language identification identifies the dominant language on each audio channel.</p>
509 /// <p>Note that you must include either <code>LanguageCode</code> or <code>IdentifyLanguage</code> or <code>IdentifyMultipleLanguages</code> in your request. If you include more than one of these parameters, your transcription job fails.</p>
510 /// <p>Streaming language identification can't be combined with custom language models or redaction.</p>
511 pub fn identify_language(mut self, input: bool) -> Self {
512 self.inner = self.inner.identify_language(input);
513 self
514 }
515 /// <p>Enables automatic language identification for your transcription.</p>
516 /// <p>If you include <code>IdentifyLanguage</code>, you must include a list of language codes, using <code>LanguageOptions</code>, that you think may be present in your audio stream.</p>
517 /// <p>You can also include a preferred language using <code>PreferredLanguage</code>. Adding a preferred language can help Amazon Transcribe identify the language faster than if you omit this parameter.</p>
518 /// <p>If you have multi-channel audio that contains different languages on each channel, and you've enabled channel identification, automatic language identification identifies the dominant language on each audio channel.</p>
519 /// <p>Note that you must include either <code>LanguageCode</code> or <code>IdentifyLanguage</code> or <code>IdentifyMultipleLanguages</code> in your request. If you include more than one of these parameters, your transcription job fails.</p>
520 /// <p>Streaming language identification can't be combined with custom language models or redaction.</p>
521 pub fn set_identify_language(mut self, input: ::std::option::Option<bool>) -> Self {
522 self.inner = self.inner.set_identify_language(input);
523 self
524 }
525 /// <p>Enables automatic language identification for your transcription.</p>
526 /// <p>If you include <code>IdentifyLanguage</code>, you must include a list of language codes, using <code>LanguageOptions</code>, that you think may be present in your audio stream.</p>
527 /// <p>You can also include a preferred language using <code>PreferredLanguage</code>. Adding a preferred language can help Amazon Transcribe identify the language faster than if you omit this parameter.</p>
528 /// <p>If you have multi-channel audio that contains different languages on each channel, and you've enabled channel identification, automatic language identification identifies the dominant language on each audio channel.</p>
529 /// <p>Note that you must include either <code>LanguageCode</code> or <code>IdentifyLanguage</code> or <code>IdentifyMultipleLanguages</code> in your request. If you include more than one of these parameters, your transcription job fails.</p>
530 /// <p>Streaming language identification can't be combined with custom language models or redaction.</p>
531 pub fn get_identify_language(&self) -> &::std::option::Option<bool> {
532 self.inner.get_identify_language()
533 }
534 /// <p>Specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended.</p>
535 /// <p>Including language options can improve the accuracy of language identification.</p>
536 /// <p>If you include <code>LanguageOptions</code> in your request, you must also include <code>IdentifyLanguage</code> or <code>IdentifyMultipleLanguages</code>.</p>
537 /// <p>For a list of languages supported with Amazon Transcribe streaming, refer to the <a href="https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html">Supported languages</a> table.</p><important>
538 /// <p>You can only include one language dialect per language per stream. For example, you cannot include <code>en-US</code> and <code>en-AU</code> in the same request.</p>
539 /// </important>
540 pub fn language_options(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
541 self.inner = self.inner.language_options(input.into());
542 self
543 }
544 /// <p>Specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended.</p>
545 /// <p>Including language options can improve the accuracy of language identification.</p>
546 /// <p>If you include <code>LanguageOptions</code> in your request, you must also include <code>IdentifyLanguage</code> or <code>IdentifyMultipleLanguages</code>.</p>
547 /// <p>For a list of languages supported with Amazon Transcribe streaming, refer to the <a href="https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html">Supported languages</a> table.</p><important>
548 /// <p>You can only include one language dialect per language per stream. For example, you cannot include <code>en-US</code> and <code>en-AU</code> in the same request.</p>
549 /// </important>
550 pub fn set_language_options(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
551 self.inner = self.inner.set_language_options(input);
552 self
553 }
554 /// <p>Specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended.</p>
555 /// <p>Including language options can improve the accuracy of language identification.</p>
556 /// <p>If you include <code>LanguageOptions</code> in your request, you must also include <code>IdentifyLanguage</code> or <code>IdentifyMultipleLanguages</code>.</p>
557 /// <p>For a list of languages supported with Amazon Transcribe streaming, refer to the <a href="https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html">Supported languages</a> table.</p><important>
558 /// <p>You can only include one language dialect per language per stream. For example, you cannot include <code>en-US</code> and <code>en-AU</code> in the same request.</p>
559 /// </important>
560 pub fn get_language_options(&self) -> &::std::option::Option<::std::string::String> {
561 self.inner.get_language_options()
562 }
563 /// <p>Specify a preferred language from the subset of languages codes you specified in <code>LanguageOptions</code>.</p>
564 /// <p>You can only use this parameter if you've included <code>IdentifyLanguage</code> and <code>LanguageOptions</code> in your request.</p>
565 pub fn preferred_language(mut self, input: crate::types::LanguageCode) -> Self {
566 self.inner = self.inner.preferred_language(input);
567 self
568 }
569 /// <p>Specify a preferred language from the subset of languages codes you specified in <code>LanguageOptions</code>.</p>
570 /// <p>You can only use this parameter if you've included <code>IdentifyLanguage</code> and <code>LanguageOptions</code> in your request.</p>
571 pub fn set_preferred_language(mut self, input: ::std::option::Option<crate::types::LanguageCode>) -> Self {
572 self.inner = self.inner.set_preferred_language(input);
573 self
574 }
575 /// <p>Specify a preferred language from the subset of languages codes you specified in <code>LanguageOptions</code>.</p>
576 /// <p>You can only use this parameter if you've included <code>IdentifyLanguage</code> and <code>LanguageOptions</code> in your request.</p>
577 pub fn get_preferred_language(&self) -> &::std::option::Option<crate::types::LanguageCode> {
578 self.inner.get_preferred_language()
579 }
580 /// <p>Enables automatic multi-language identification in your transcription job request. Use this parameter if your stream contains more than one language. If your stream contains only one language, use IdentifyLanguage instead.</p>
581 /// <p>If you include <code>IdentifyMultipleLanguages</code>, you must include a list of language codes, using <code>LanguageOptions</code>, that you think may be present in your stream.</p>
582 /// <p>If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic multiple language identification request, include <code>VocabularyNames</code> or <code>VocabularyFilterNames</code>.</p>
583 /// <p>Note that you must include one of <code>LanguageCode</code>, <code>IdentifyLanguage</code>, or <code>IdentifyMultipleLanguages</code> in your request. If you include more than one of these parameters, your transcription job fails.</p>
584 pub fn identify_multiple_languages(mut self, input: bool) -> Self {
585 self.inner = self.inner.identify_multiple_languages(input);
586 self
587 }
588 /// <p>Enables automatic multi-language identification in your transcription job request. Use this parameter if your stream contains more than one language. If your stream contains only one language, use IdentifyLanguage instead.</p>
589 /// <p>If you include <code>IdentifyMultipleLanguages</code>, you must include a list of language codes, using <code>LanguageOptions</code>, that you think may be present in your stream.</p>
590 /// <p>If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic multiple language identification request, include <code>VocabularyNames</code> or <code>VocabularyFilterNames</code>.</p>
591 /// <p>Note that you must include one of <code>LanguageCode</code>, <code>IdentifyLanguage</code>, or <code>IdentifyMultipleLanguages</code> in your request. If you include more than one of these parameters, your transcription job fails.</p>
592 pub fn set_identify_multiple_languages(mut self, input: ::std::option::Option<bool>) -> Self {
593 self.inner = self.inner.set_identify_multiple_languages(input);
594 self
595 }
596 /// <p>Enables automatic multi-language identification in your transcription job request. Use this parameter if your stream contains more than one language. If your stream contains only one language, use IdentifyLanguage instead.</p>
597 /// <p>If you include <code>IdentifyMultipleLanguages</code>, you must include a list of language codes, using <code>LanguageOptions</code>, that you think may be present in your stream.</p>
598 /// <p>If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic multiple language identification request, include <code>VocabularyNames</code> or <code>VocabularyFilterNames</code>.</p>
599 /// <p>Note that you must include one of <code>LanguageCode</code>, <code>IdentifyLanguage</code>, or <code>IdentifyMultipleLanguages</code> in your request. If you include more than one of these parameters, your transcription job fails.</p>
600 pub fn get_identify_multiple_languages(&self) -> &::std::option::Option<bool> {
601 self.inner.get_identify_multiple_languages()
602 }
603 /// <p>Specify the names of the custom vocabularies that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
604 /// <p>If none of the languages of the specified custom vocabularies match the language identified in your media, your job fails.</p><important>
605 /// <p>This parameter is only intended for use <b>with</b> the <code>IdentifyLanguage</code> parameter. If you're <b>not</b> including <code>IdentifyLanguage</code> in your request and want to use a custom vocabulary with your transcription, use the <code>VocabularyName</code> parameter instead.</p>
606 /// </important>
607 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html">Custom vocabularies</a>.</p>
608 pub fn vocabulary_names(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
609 self.inner = self.inner.vocabulary_names(input.into());
610 self
611 }
612 /// <p>Specify the names of the custom vocabularies that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
613 /// <p>If none of the languages of the specified custom vocabularies match the language identified in your media, your job fails.</p><important>
614 /// <p>This parameter is only intended for use <b>with</b> the <code>IdentifyLanguage</code> parameter. If you're <b>not</b> including <code>IdentifyLanguage</code> in your request and want to use a custom vocabulary with your transcription, use the <code>VocabularyName</code> parameter instead.</p>
615 /// </important>
616 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html">Custom vocabularies</a>.</p>
617 pub fn set_vocabulary_names(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
618 self.inner = self.inner.set_vocabulary_names(input);
619 self
620 }
621 /// <p>Specify the names of the custom vocabularies that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>
622 /// <p>If none of the languages of the specified custom vocabularies match the language identified in your media, your job fails.</p><important>
623 /// <p>This parameter is only intended for use <b>with</b> the <code>IdentifyLanguage</code> parameter. If you're <b>not</b> including <code>IdentifyLanguage</code> in your request and want to use a custom vocabulary with your transcription, use the <code>VocabularyName</code> parameter instead.</p>
624 /// </important>
625 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html">Custom vocabularies</a>.</p>
626 pub fn get_vocabulary_names(&self) -> &::std::option::Option<::std::string::String> {
627 self.inner.get_vocabulary_names()
628 }
629 /// <p>Specify the names of the custom vocabulary filters that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive.</p>
630 /// <p>If none of the languages of the specified custom vocabulary filters match the language identified in your media, your job fails.</p><important>
631 /// <p>This parameter is only intended for use <b>with</b> the <code>IdentifyLanguage</code> parameter. If you're <b>not</b> including <code>IdentifyLanguage</code> in your request and want to use a custom vocabulary filter with your transcription, use the <code>VocabularyFilterName</code> parameter instead.</p>
632 /// </important>
633 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html">Using vocabulary filtering with unwanted words</a>.</p>
634 pub fn vocabulary_filter_names(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
635 self.inner = self.inner.vocabulary_filter_names(input.into());
636 self
637 }
638 /// <p>Specify the names of the custom vocabulary filters that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive.</p>
639 /// <p>If none of the languages of the specified custom vocabulary filters match the language identified in your media, your job fails.</p><important>
640 /// <p>This parameter is only intended for use <b>with</b> the <code>IdentifyLanguage</code> parameter. If you're <b>not</b> including <code>IdentifyLanguage</code> in your request and want to use a custom vocabulary filter with your transcription, use the <code>VocabularyFilterName</code> parameter instead.</p>
641 /// </important>
642 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html">Using vocabulary filtering with unwanted words</a>.</p>
643 pub fn set_vocabulary_filter_names(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
644 self.inner = self.inner.set_vocabulary_filter_names(input);
645 self
646 }
647 /// <p>Specify the names of the custom vocabulary filters that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive.</p>
648 /// <p>If none of the languages of the specified custom vocabulary filters match the language identified in your media, your job fails.</p><important>
649 /// <p>This parameter is only intended for use <b>with</b> the <code>IdentifyLanguage</code> parameter. If you're <b>not</b> including <code>IdentifyLanguage</code> in your request and want to use a custom vocabulary filter with your transcription, use the <code>VocabularyFilterName</code> parameter instead.</p>
650 /// </important>
651 /// <p>For more information, see <a href="https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html">Using vocabulary filtering with unwanted words</a>.</p>
652 pub fn get_vocabulary_filter_names(&self) -> &::std::option::Option<::std::string::String> {
653 self.inner.get_vocabulary_filter_names()
654 }
655}
656