Class InputAudioConfig.Builder

  • All Implemented Interfaces:
    InputAudioConfigOrBuilder, com.google.protobuf.Message.Builder, com.google.protobuf.MessageLite.Builder, com.google.protobuf.MessageLiteOrBuilder, com.google.protobuf.MessageOrBuilder, Cloneable
    Enclosing class:
    InputAudioConfig

    public static final class InputAudioConfig.Builder
    extends com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>
    implements InputAudioConfigOrBuilder
     Instructs the speech recognizer how to process the audio content.
     
    Protobuf type google.cloud.dialogflow.v2.InputAudioConfig
    • Method Detail

      • getDescriptor

        public static final com.google.protobuf.Descriptors.Descriptor getDescriptor()
      • internalGetFieldAccessorTable

        protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
        Specified by:
        internalGetFieldAccessorTable in class com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>
      • clear

        public InputAudioConfig.Builder clear()
        Specified by:
        clear in interface com.google.protobuf.Message.Builder
        Specified by:
        clear in interface com.google.protobuf.MessageLite.Builder
        Overrides:
        clear in class com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>
      • getDescriptorForType

        public com.google.protobuf.Descriptors.Descriptor getDescriptorForType()
        Specified by:
        getDescriptorForType in interface com.google.protobuf.Message.Builder
        Specified by:
        getDescriptorForType in interface com.google.protobuf.MessageOrBuilder
        Overrides:
        getDescriptorForType in class com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>
      • getDefaultInstanceForType

        public InputAudioConfig getDefaultInstanceForType()
        Specified by:
        getDefaultInstanceForType in interface com.google.protobuf.MessageLiteOrBuilder
        Specified by:
        getDefaultInstanceForType in interface com.google.protobuf.MessageOrBuilder
      • build

        public InputAudioConfig build()
        Specified by:
        build in interface com.google.protobuf.Message.Builder
        Specified by:
        build in interface com.google.protobuf.MessageLite.Builder
      • buildPartial

        public InputAudioConfig buildPartial()
        Specified by:
        buildPartial in interface com.google.protobuf.Message.Builder
        Specified by:
        buildPartial in interface com.google.protobuf.MessageLite.Builder
      • clone

        public InputAudioConfig.Builder clone()
        Specified by:
        clone in interface com.google.protobuf.Message.Builder
        Specified by:
        clone in interface com.google.protobuf.MessageLite.Builder
        Overrides:
        clone in class com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>
      • setField

        public InputAudioConfig.Builder setField​(com.google.protobuf.Descriptors.FieldDescriptor field,
                                                 Object value)
        Specified by:
        setField in interface com.google.protobuf.Message.Builder
        Overrides:
        setField in class com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>
      • clearField

        public InputAudioConfig.Builder clearField​(com.google.protobuf.Descriptors.FieldDescriptor field)
        Specified by:
        clearField in interface com.google.protobuf.Message.Builder
        Overrides:
        clearField in class com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>
      • clearOneof

        public InputAudioConfig.Builder clearOneof​(com.google.protobuf.Descriptors.OneofDescriptor oneof)
        Specified by:
        clearOneof in interface com.google.protobuf.Message.Builder
        Overrides:
        clearOneof in class com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>
      • setRepeatedField

        public InputAudioConfig.Builder setRepeatedField​(com.google.protobuf.Descriptors.FieldDescriptor field,
                                                         int index,
                                                         Object value)
        Specified by:
        setRepeatedField in interface com.google.protobuf.Message.Builder
        Overrides:
        setRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>
      • addRepeatedField

        public InputAudioConfig.Builder addRepeatedField​(com.google.protobuf.Descriptors.FieldDescriptor field,
                                                         Object value)
        Specified by:
        addRepeatedField in interface com.google.protobuf.Message.Builder
        Overrides:
        addRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>
      • mergeFrom

        public InputAudioConfig.Builder mergeFrom​(com.google.protobuf.Message other)
        Specified by:
        mergeFrom in interface com.google.protobuf.Message.Builder
        Overrides:
        mergeFrom in class com.google.protobuf.AbstractMessage.Builder<InputAudioConfig.Builder>
      • isInitialized

        public final boolean isInitialized()
        Specified by:
        isInitialized in interface com.google.protobuf.MessageLiteOrBuilder
        Overrides:
        isInitialized in class com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>
      • mergeFrom

        public InputAudioConfig.Builder mergeFrom​(com.google.protobuf.CodedInputStream input,
                                                  com.google.protobuf.ExtensionRegistryLite extensionRegistry)
                                           throws IOException
        Specified by:
        mergeFrom in interface com.google.protobuf.Message.Builder
        Specified by:
        mergeFrom in interface com.google.protobuf.MessageLite.Builder
        Overrides:
        mergeFrom in class com.google.protobuf.AbstractMessage.Builder<InputAudioConfig.Builder>
        Throws:
        IOException
      • getAudioEncodingValue

        public int getAudioEncodingValue()
         Required. Audio encoding of the audio content to process.
         
        .google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
        Specified by:
        getAudioEncodingValue in interface InputAudioConfigOrBuilder
        Returns:
        The enum numeric value on the wire for audioEncoding.
      • setAudioEncodingValue

        public InputAudioConfig.Builder setAudioEncodingValue​(int value)
         Required. Audio encoding of the audio content to process.
         
        .google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
        Parameters:
        value - The enum numeric value on the wire for audioEncoding to set.
        Returns:
        This builder for chaining.
      • getAudioEncoding

        public AudioEncoding getAudioEncoding()
         Required. Audio encoding of the audio content to process.
         
        .google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
        Specified by:
        getAudioEncoding in interface InputAudioConfigOrBuilder
        Returns:
        The audioEncoding.
      • setAudioEncoding

        public InputAudioConfig.Builder setAudioEncoding​(AudioEncoding value)
         Required. Audio encoding of the audio content to process.
         
        .google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
        Parameters:
        value - The audioEncoding to set.
        Returns:
        This builder for chaining.
      • clearAudioEncoding

        public InputAudioConfig.Builder clearAudioEncoding()
         Required. Audio encoding of the audio content to process.
         
        .google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
        Returns:
        This builder for chaining.
      • getSampleRateHertz

        public int getSampleRateHertz()
         Required. Sample rate (in Hertz) of the audio content sent in the query.
         Refer to [Cloud Speech API
         documentation](https://cloud.google.com/speech-to-text/docs/basics) for
         more details.
         
        int32 sample_rate_hertz = 2 [(.google.api.field_behavior) = REQUIRED];
        Specified by:
        getSampleRateHertz in interface InputAudioConfigOrBuilder
        Returns:
        The sampleRateHertz.
      • setSampleRateHertz

        public InputAudioConfig.Builder setSampleRateHertz​(int value)
         Required. Sample rate (in Hertz) of the audio content sent in the query.
         Refer to [Cloud Speech API
         documentation](https://cloud.google.com/speech-to-text/docs/basics) for
         more details.
         
        int32 sample_rate_hertz = 2 [(.google.api.field_behavior) = REQUIRED];
        Parameters:
        value - The sampleRateHertz to set.
        Returns:
        This builder for chaining.
      • clearSampleRateHertz

        public InputAudioConfig.Builder clearSampleRateHertz()
         Required. Sample rate (in Hertz) of the audio content sent in the query.
         Refer to [Cloud Speech API
         documentation](https://cloud.google.com/speech-to-text/docs/basics) for
         more details.
         
        int32 sample_rate_hertz = 2 [(.google.api.field_behavior) = REQUIRED];
        Returns:
        This builder for chaining.
      • getLanguageCode

        public String getLanguageCode()
         Required. The language of the supplied audio. Dialogflow does not do
         translations. See [Language
         Support](https://cloud.google.com/dialogflow/docs/reference/language)
         for a list of the currently supported language codes. Note that queries in
         the same session do not necessarily need to specify the same language.
         
        string language_code = 3 [(.google.api.field_behavior) = REQUIRED];
        Specified by:
        getLanguageCode in interface InputAudioConfigOrBuilder
        Returns:
        The languageCode.
      • getLanguageCodeBytes

        public com.google.protobuf.ByteString getLanguageCodeBytes()
         Required. The language of the supplied audio. Dialogflow does not do
         translations. See [Language
         Support](https://cloud.google.com/dialogflow/docs/reference/language)
         for a list of the currently supported language codes. Note that queries in
         the same session do not necessarily need to specify the same language.
         
        string language_code = 3 [(.google.api.field_behavior) = REQUIRED];
        Specified by:
        getLanguageCodeBytes in interface InputAudioConfigOrBuilder
        Returns:
        The bytes for languageCode.
      • setLanguageCode

        public InputAudioConfig.Builder setLanguageCode​(String value)
         Required. The language of the supplied audio. Dialogflow does not do
         translations. See [Language
         Support](https://cloud.google.com/dialogflow/docs/reference/language)
         for a list of the currently supported language codes. Note that queries in
         the same session do not necessarily need to specify the same language.
         
        string language_code = 3 [(.google.api.field_behavior) = REQUIRED];
        Parameters:
        value - The languageCode to set.
        Returns:
        This builder for chaining.
      • clearLanguageCode

        public InputAudioConfig.Builder clearLanguageCode()
         Required. The language of the supplied audio. Dialogflow does not do
         translations. See [Language
         Support](https://cloud.google.com/dialogflow/docs/reference/language)
         for a list of the currently supported language codes. Note that queries in
         the same session do not necessarily need to specify the same language.
         
        string language_code = 3 [(.google.api.field_behavior) = REQUIRED];
        Returns:
        This builder for chaining.
      • setLanguageCodeBytes

        public InputAudioConfig.Builder setLanguageCodeBytes​(com.google.protobuf.ByteString value)
         Required. The language of the supplied audio. Dialogflow does not do
         translations. See [Language
         Support](https://cloud.google.com/dialogflow/docs/reference/language)
         for a list of the currently supported language codes. Note that queries in
         the same session do not necessarily need to specify the same language.
         
        string language_code = 3 [(.google.api.field_behavior) = REQUIRED];
        Parameters:
        value - The bytes for languageCode to set.
        Returns:
        This builder for chaining.
      • getEnableWordInfo

        public boolean getEnableWordInfo()
         If `true`, Dialogflow returns
         [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
         [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
         with information about the recognized speech words, e.g. start and end time
         offsets. If false or unspecified, Speech doesn't return any word-level
         information.
         
        bool enable_word_info = 13;
        Specified by:
        getEnableWordInfo in interface InputAudioConfigOrBuilder
        Returns:
        The enableWordInfo.
      • setEnableWordInfo

        public InputAudioConfig.Builder setEnableWordInfo​(boolean value)
         If `true`, Dialogflow returns
         [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
         [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
         with information about the recognized speech words, e.g. start and end time
         offsets. If false or unspecified, Speech doesn't return any word-level
         information.
         
        bool enable_word_info = 13;
        Parameters:
        value - The enableWordInfo to set.
        Returns:
        This builder for chaining.
      • clearEnableWordInfo

        public InputAudioConfig.Builder clearEnableWordInfo()
         If `true`, Dialogflow returns
         [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
         [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
         with information about the recognized speech words, e.g. start and end time
         offsets. If false or unspecified, Speech doesn't return any word-level
         information.
         
        bool enable_word_info = 13;
        Returns:
        This builder for chaining.
      • getPhraseHintsList

        @Deprecated
        public com.google.protobuf.ProtocolStringList getPhraseHintsList()
        Deprecated.
        google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See google/cloud/dialogflow/v2/audio_config.proto;l=129
         A list of strings containing words and phrases that the speech
         recognizer should recognize with higher likelihood.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
        
         This field is deprecated. Please use [`speech_contexts`]() instead. If you
         specify both [`phrase_hints`]() and [`speech_contexts`](), Dialogflow will
         treat the [`phrase_hints`]() as a single additional [`SpeechContext`]().
         
        repeated string phrase_hints = 4 [deprecated = true];
        Specified by:
        getPhraseHintsList in interface InputAudioConfigOrBuilder
        Returns:
        A list containing the phraseHints.
      • getPhraseHintsCount

        @Deprecated
        public int getPhraseHintsCount()
        Deprecated.
        google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See google/cloud/dialogflow/v2/audio_config.proto;l=129
         A list of strings containing words and phrases that the speech
         recognizer should recognize with higher likelihood.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
        
         This field is deprecated. Please use [`speech_contexts`]() instead. If you
         specify both [`phrase_hints`]() and [`speech_contexts`](), Dialogflow will
         treat the [`phrase_hints`]() as a single additional [`SpeechContext`]().
         
        repeated string phrase_hints = 4 [deprecated = true];
        Specified by:
        getPhraseHintsCount in interface InputAudioConfigOrBuilder
        Returns:
        The count of phraseHints.
      • getPhraseHints

        @Deprecated
        public String getPhraseHints​(int index)
        Deprecated.
        google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See google/cloud/dialogflow/v2/audio_config.proto;l=129
         A list of strings containing words and phrases that the speech
         recognizer should recognize with higher likelihood.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
        
         This field is deprecated. Please use [`speech_contexts`]() instead. If you
         specify both [`phrase_hints`]() and [`speech_contexts`](), Dialogflow will
         treat the [`phrase_hints`]() as a single additional [`SpeechContext`]().
         
        repeated string phrase_hints = 4 [deprecated = true];
        Specified by:
        getPhraseHints in interface InputAudioConfigOrBuilder
        Parameters:
        index - The index of the element to return.
        Returns:
        The phraseHints at the given index.
      • getPhraseHintsBytes

        @Deprecated
        public com.google.protobuf.ByteString getPhraseHintsBytes​(int index)
        Deprecated.
        google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See google/cloud/dialogflow/v2/audio_config.proto;l=129
         A list of strings containing words and phrases that the speech
         recognizer should recognize with higher likelihood.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
        
         This field is deprecated. Please use [`speech_contexts`]() instead. If you
         specify both [`phrase_hints`]() and [`speech_contexts`](), Dialogflow will
         treat the [`phrase_hints`]() as a single additional [`SpeechContext`]().
         
        repeated string phrase_hints = 4 [deprecated = true];
        Specified by:
        getPhraseHintsBytes in interface InputAudioConfigOrBuilder
        Parameters:
        index - The index of the value to return.
        Returns:
        The bytes of the phraseHints at the given index.
      • setPhraseHints

        @Deprecated
        public InputAudioConfig.Builder setPhraseHints​(int index,
                                                       String value)
        Deprecated.
        google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See google/cloud/dialogflow/v2/audio_config.proto;l=129
         A list of strings containing words and phrases that the speech
         recognizer should recognize with higher likelihood.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
        
         This field is deprecated. Please use [`speech_contexts`]() instead. If you
         specify both [`phrase_hints`]() and [`speech_contexts`](), Dialogflow will
         treat the [`phrase_hints`]() as a single additional [`SpeechContext`]().
         
        repeated string phrase_hints = 4 [deprecated = true];
        Parameters:
        index - The index to set the value at.
        value - The phraseHints to set.
        Returns:
        This builder for chaining.
      • addPhraseHints

        @Deprecated
        public InputAudioConfig.Builder addPhraseHints​(String value)
        Deprecated.
        google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See google/cloud/dialogflow/v2/audio_config.proto;l=129
         A list of strings containing words and phrases that the speech
         recognizer should recognize with higher likelihood.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
        
         This field is deprecated. Please use [`speech_contexts`]() instead. If you
         specify both [`phrase_hints`]() and [`speech_contexts`](), Dialogflow will
         treat the [`phrase_hints`]() as a single additional [`SpeechContext`]().
         
        repeated string phrase_hints = 4 [deprecated = true];
        Parameters:
        value - The phraseHints to add.
        Returns:
        This builder for chaining.
      • addAllPhraseHints

        @Deprecated
        public InputAudioConfig.Builder addAllPhraseHints​(Iterable<String> values)
        Deprecated.
        google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See google/cloud/dialogflow/v2/audio_config.proto;l=129
         A list of strings containing words and phrases that the speech
         recognizer should recognize with higher likelihood.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
        
         This field is deprecated. Please use [`speech_contexts`]() instead. If you
         specify both [`phrase_hints`]() and [`speech_contexts`](), Dialogflow will
         treat the [`phrase_hints`]() as a single additional [`SpeechContext`]().
         
        repeated string phrase_hints = 4 [deprecated = true];
        Parameters:
        values - The phraseHints to add.
        Returns:
        This builder for chaining.
      • clearPhraseHints

        @Deprecated
        public InputAudioConfig.Builder clearPhraseHints()
        Deprecated.
        google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See google/cloud/dialogflow/v2/audio_config.proto;l=129
         A list of strings containing words and phrases that the speech
         recognizer should recognize with higher likelihood.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
        
         This field is deprecated. Please use [`speech_contexts`]() instead. If you
         specify both [`phrase_hints`]() and [`speech_contexts`](), Dialogflow will
         treat the [`phrase_hints`]() as a single additional [`SpeechContext`]().
         
        repeated string phrase_hints = 4 [deprecated = true];
        Returns:
        This builder for chaining.
      • addPhraseHintsBytes

        @Deprecated
        public InputAudioConfig.Builder addPhraseHintsBytes​(com.google.protobuf.ByteString value)
        Deprecated.
        google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See google/cloud/dialogflow/v2/audio_config.proto;l=129
         A list of strings containing words and phrases that the speech
         recognizer should recognize with higher likelihood.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
        
         This field is deprecated. Please use [`speech_contexts`]() instead. If you
         specify both [`phrase_hints`]() and [`speech_contexts`](), Dialogflow will
         treat the [`phrase_hints`]() as a single additional [`SpeechContext`]().
         
        repeated string phrase_hints = 4 [deprecated = true];
        Parameters:
        value - The bytes of the phraseHints to add.
        Returns:
        This builder for chaining.
      • getSpeechContextsList

        public List<SpeechContext> getSpeechContextsList()
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
        Specified by:
        getSpeechContextsList in interface InputAudioConfigOrBuilder
      • getSpeechContextsCount

        public int getSpeechContextsCount()
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
        Specified by:
        getSpeechContextsCount in interface InputAudioConfigOrBuilder
      • getSpeechContexts

        public SpeechContext getSpeechContexts​(int index)
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
        Specified by:
        getSpeechContexts in interface InputAudioConfigOrBuilder
      • setSpeechContexts

        public InputAudioConfig.Builder setSpeechContexts​(int index,
                                                          SpeechContext value)
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • setSpeechContexts

        public InputAudioConfig.Builder setSpeechContexts​(int index,
                                                          SpeechContext.Builder builderForValue)
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • addSpeechContexts

        public InputAudioConfig.Builder addSpeechContexts​(SpeechContext value)
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • addSpeechContexts

        public InputAudioConfig.Builder addSpeechContexts​(int index,
                                                          SpeechContext value)
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • addSpeechContexts

        public InputAudioConfig.Builder addSpeechContexts​(SpeechContext.Builder builderForValue)
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • addSpeechContexts

        public InputAudioConfig.Builder addSpeechContexts​(int index,
                                                          SpeechContext.Builder builderForValue)
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • addAllSpeechContexts

        public InputAudioConfig.Builder addAllSpeechContexts​(Iterable<? extends SpeechContext> values)
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • clearSpeechContexts

        public InputAudioConfig.Builder clearSpeechContexts()
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • removeSpeechContexts

        public InputAudioConfig.Builder removeSpeechContexts​(int index)
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • getSpeechContextsBuilder

        public SpeechContext.Builder getSpeechContextsBuilder​(int index)
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • getSpeechContextsOrBuilder

        public SpeechContextOrBuilder getSpeechContextsOrBuilder​(int index)
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
        Specified by:
        getSpeechContextsOrBuilder in interface InputAudioConfigOrBuilder
      • getSpeechContextsOrBuilderList

        public List<? extends SpeechContextOrBuilder> getSpeechContextsOrBuilderList()
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
        Specified by:
        getSpeechContextsOrBuilderList in interface InputAudioConfigOrBuilder
      • addSpeechContextsBuilder

        public SpeechContext.Builder addSpeechContextsBuilder()
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • addSpeechContextsBuilder

        public SpeechContext.Builder addSpeechContextsBuilder​(int index)
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • getSpeechContextsBuilderList

        public List<SpeechContext.Builder> getSpeechContextsBuilderList()
         Context information to assist speech recognition.
        
         See [the Cloud Speech
         documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
         for more details.
         
        repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
      • getModel

        public String getModel()
         Which Speech model to select for the given request. Select the
         model best suited to your domain to get best results. If a model is not
         explicitly specified, then we auto-select a model based on the parameters
         in the InputAudioConfig.
         If enhanced speech model is enabled for the agent and an enhanced
         version of the specified model for the language does not exist, then the
         speech is recognized using the standard version of the specified model.
         Refer to
         [Cloud Speech API
         documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
         for more details.
         If you specify a model, the following models typically have the best
         performance:
        
         - phone_call (best for Agent Assist and telephony)
         - latest_short (best for Dialogflow non-telephony)
         - command_and_search (best for very short utterances and commands)
         
        string model = 7;
        Specified by:
        getModel in interface InputAudioConfigOrBuilder
        Returns:
        The model.
      • getModelBytes

        public com.google.protobuf.ByteString getModelBytes()
         Which Speech model to select for the given request. Select the
         model best suited to your domain to get best results. If a model is not
         explicitly specified, then we auto-select a model based on the parameters
         in the InputAudioConfig.
         If enhanced speech model is enabled for the agent and an enhanced
         version of the specified model for the language does not exist, then the
         speech is recognized using the standard version of the specified model.
         Refer to
         [Cloud Speech API
         documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
         for more details.
         If you specify a model, the following models typically have the best
         performance:
        
         - phone_call (best for Agent Assist and telephony)
         - latest_short (best for Dialogflow non-telephony)
         - command_and_search (best for very short utterances and commands)
         
        string model = 7;
        Specified by:
        getModelBytes in interface InputAudioConfigOrBuilder
        Returns:
        The bytes for model.
      • setModel

        public InputAudioConfig.Builder setModel​(String value)
         Which Speech model to select for the given request. Select the
         model best suited to your domain to get best results. If a model is not
         explicitly specified, then we auto-select a model based on the parameters
         in the InputAudioConfig.
         If enhanced speech model is enabled for the agent and an enhanced
         version of the specified model for the language does not exist, then the
         speech is recognized using the standard version of the specified model.
         Refer to
         [Cloud Speech API
         documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
         for more details.
         If you specify a model, the following models typically have the best
         performance:
        
         - phone_call (best for Agent Assist and telephony)
         - latest_short (best for Dialogflow non-telephony)
         - command_and_search (best for very short utterances and commands)
         
        string model = 7;
        Parameters:
        value - The model to set.
        Returns:
        This builder for chaining.
      • clearModel

        public InputAudioConfig.Builder clearModel()
         Which Speech model to select for the given request. Select the
         model best suited to your domain to get best results. If a model is not
         explicitly specified, then we auto-select a model based on the parameters
         in the InputAudioConfig.
         If enhanced speech model is enabled for the agent and an enhanced
         version of the specified model for the language does not exist, then the
         speech is recognized using the standard version of the specified model.
         Refer to
         [Cloud Speech API
         documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
         for more details.
         If you specify a model, the following models typically have the best
         performance:
        
         - phone_call (best for Agent Assist and telephony)
         - latest_short (best for Dialogflow non-telephony)
         - command_and_search (best for very short utterances and commands)
         
        string model = 7;
        Returns:
        This builder for chaining.
      • setModelBytes

        public InputAudioConfig.Builder setModelBytes​(com.google.protobuf.ByteString value)
         Which Speech model to select for the given request. Select the
         model best suited to your domain to get best results. If a model is not
         explicitly specified, then we auto-select a model based on the parameters
         in the InputAudioConfig.
         If enhanced speech model is enabled for the agent and an enhanced
         version of the specified model for the language does not exist, then the
         speech is recognized using the standard version of the specified model.
         Refer to
         [Cloud Speech API
         documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
         for more details.
         If you specify a model, the following models typically have the best
         performance:
        
         - phone_call (best for Agent Assist and telephony)
         - latest_short (best for Dialogflow non-telephony)
         - command_and_search (best for very short utterances and commands)
         
        string model = 7;
        Parameters:
        value - The bytes for model to set.
        Returns:
        This builder for chaining.
      • getModelVariantValue

        public int getModelVariantValue()
         Which variant of the [Speech
         model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
         
        .google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;
        Specified by:
        getModelVariantValue in interface InputAudioConfigOrBuilder
        Returns:
        The enum numeric value on the wire for modelVariant.
      • setModelVariantValue

        public InputAudioConfig.Builder setModelVariantValue​(int value)
         Which variant of the [Speech
         model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
         
        .google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;
        Parameters:
        value - The enum numeric value on the wire for modelVariant to set.
        Returns:
        This builder for chaining.
      • getModelVariant

        public SpeechModelVariant getModelVariant()
         Which variant of the [Speech
         model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
         
        .google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;
        Specified by:
        getModelVariant in interface InputAudioConfigOrBuilder
        Returns:
        The modelVariant.
      • setModelVariant

        public InputAudioConfig.Builder setModelVariant​(SpeechModelVariant value)
         Which variant of the [Speech
         model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
         
        .google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;
        Parameters:
        value - The modelVariant to set.
        Returns:
        This builder for chaining.
      • clearModelVariant

        public InputAudioConfig.Builder clearModelVariant()
         Which variant of the [Speech
         model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
         
        .google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;
        Returns:
        This builder for chaining.
      • getSingleUtterance

        public boolean getSingleUtterance()
         If `false` (default), recognition does not cease until the
         client closes the stream.
         If `true`, the recognizer will detect a single spoken utterance in input
         audio. Recognition ceases when it detects the audio's voice has
         stopped or paused. In this case, once a detected intent is received, the
         client should close the stream and start a new request with a new stream as
         needed.
         Note: This setting is relevant only for streaming methods.
         Note: When specified, InputAudioConfig.single_utterance takes precedence
         over StreamingDetectIntentRequest.single_utterance.
         
        bool single_utterance = 8;
        Specified by:
        getSingleUtterance in interface InputAudioConfigOrBuilder
        Returns:
        The singleUtterance.
      • setSingleUtterance

        public InputAudioConfig.Builder setSingleUtterance​(boolean value)
         If `false` (default), recognition does not cease until the
         client closes the stream.
         If `true`, the recognizer will detect a single spoken utterance in input
         audio. Recognition ceases when it detects the audio's voice has
         stopped or paused. In this case, once a detected intent is received, the
         client should close the stream and start a new request with a new stream as
         needed.
         Note: This setting is relevant only for streaming methods.
         Note: When specified, InputAudioConfig.single_utterance takes precedence
         over StreamingDetectIntentRequest.single_utterance.
         
        bool single_utterance = 8;
        Parameters:
        value - The singleUtterance to set.
        Returns:
        This builder for chaining.
      • clearSingleUtterance

        public InputAudioConfig.Builder clearSingleUtterance()
         If `false` (default), recognition does not cease until the
         client closes the stream.
         If `true`, the recognizer will detect a single spoken utterance in input
         audio. Recognition ceases when it detects the audio's voice has
         stopped or paused. In this case, once a detected intent is received, the
         client should close the stream and start a new request with a new stream as
         needed.
         Note: This setting is relevant only for streaming methods.
         Note: When specified, InputAudioConfig.single_utterance takes precedence
         over StreamingDetectIntentRequest.single_utterance.
         
        bool single_utterance = 8;
        Returns:
        This builder for chaining.
      • getDisableNoSpeechRecognizedEvent

        public boolean getDisableNoSpeechRecognizedEvent()
         Only used in
         [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
         and
         [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
         If `false` and recognition doesn't return any result, trigger
         `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
         
        bool disable_no_speech_recognized_event = 14;
        Specified by:
        getDisableNoSpeechRecognizedEvent in interface InputAudioConfigOrBuilder
        Returns:
        The disableNoSpeechRecognizedEvent.
      • setDisableNoSpeechRecognizedEvent

        public InputAudioConfig.Builder setDisableNoSpeechRecognizedEvent​(boolean value)
         Only used in
         [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
         and
         [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
         If `false` and recognition doesn't return any result, trigger
         `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
         
        bool disable_no_speech_recognized_event = 14;
        Parameters:
        value - The disableNoSpeechRecognizedEvent to set.
        Returns:
        This builder for chaining.
      • clearDisableNoSpeechRecognizedEvent

        public InputAudioConfig.Builder clearDisableNoSpeechRecognizedEvent()
         Only used in
         [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
         and
         [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
         If `false` and recognition doesn't return any result, trigger
         `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
         
        bool disable_no_speech_recognized_event = 14;
        Returns:
        This builder for chaining.
      • getEnableAutomaticPunctuation

        public boolean getEnableAutomaticPunctuation()
         Enable automatic punctuation option at the speech backend.
         
        bool enable_automatic_punctuation = 17;
        Specified by:
        getEnableAutomaticPunctuation in interface InputAudioConfigOrBuilder
        Returns:
        The enableAutomaticPunctuation.
      • setEnableAutomaticPunctuation

        public InputAudioConfig.Builder setEnableAutomaticPunctuation​(boolean value)
         Enable automatic punctuation option at the speech backend.
         
        bool enable_automatic_punctuation = 17;
        Parameters:
        value - The enableAutomaticPunctuation to set.
        Returns:
        This builder for chaining.
      • clearEnableAutomaticPunctuation

        public InputAudioConfig.Builder clearEnableAutomaticPunctuation()
         Enable automatic punctuation option at the speech backend.
         
        bool enable_automatic_punctuation = 17;
        Returns:
        This builder for chaining.
      • setUnknownFields

        public final InputAudioConfig.Builder setUnknownFields​(com.google.protobuf.UnknownFieldSet unknownFields)
        Specified by:
        setUnknownFields in interface com.google.protobuf.Message.Builder
        Overrides:
        setUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>
      • mergeUnknownFields

        public final InputAudioConfig.Builder mergeUnknownFields​(com.google.protobuf.UnknownFieldSet unknownFields)
        Specified by:
        mergeUnknownFields in interface com.google.protobuf.Message.Builder
        Overrides:
        mergeUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<InputAudioConfig.Builder>