Class InputDataConfig.Builder

  • All Implemented Interfaces:
    InputDataConfigOrBuilder, com.google.protobuf.Message.Builder, com.google.protobuf.MessageLite.Builder, com.google.protobuf.MessageLiteOrBuilder, com.google.protobuf.MessageOrBuilder, Cloneable
    Enclosing class:
    InputDataConfig

    public static final class InputDataConfig.Builder
    extends com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>
    implements InputDataConfigOrBuilder
     Specifies Vertex AI owned input data to be used for training, and
     possibly evaluating, the Model.
     
    Protobuf type google.cloud.aiplatform.v1.InputDataConfig
    • Method Detail

      • getDescriptor

        public static final com.google.protobuf.Descriptors.Descriptor getDescriptor()
      • internalGetFieldAccessorTable

        protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
        Specified by:
        internalGetFieldAccessorTable in class com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>
      • clear

        public InputDataConfig.Builder clear()
        Specified by:
        clear in interface com.google.protobuf.Message.Builder
        Specified by:
        clear in interface com.google.protobuf.MessageLite.Builder
        Overrides:
        clear in class com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>
      • getDescriptorForType

        public com.google.protobuf.Descriptors.Descriptor getDescriptorForType()
        Specified by:
        getDescriptorForType in interface com.google.protobuf.Message.Builder
        Specified by:
        getDescriptorForType in interface com.google.protobuf.MessageOrBuilder
        Overrides:
        getDescriptorForType in class com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>
      • getDefaultInstanceForType

        public InputDataConfig getDefaultInstanceForType()
        Specified by:
        getDefaultInstanceForType in interface com.google.protobuf.MessageLiteOrBuilder
        Specified by:
        getDefaultInstanceForType in interface com.google.protobuf.MessageOrBuilder
      • build

        public InputDataConfig build()
        Specified by:
        build in interface com.google.protobuf.Message.Builder
        Specified by:
        build in interface com.google.protobuf.MessageLite.Builder
      • buildPartial

        public InputDataConfig buildPartial()
        Specified by:
        buildPartial in interface com.google.protobuf.Message.Builder
        Specified by:
        buildPartial in interface com.google.protobuf.MessageLite.Builder
      • clone

        public InputDataConfig.Builder clone()
        Specified by:
        clone in interface com.google.protobuf.Message.Builder
        Specified by:
        clone in interface com.google.protobuf.MessageLite.Builder
        Overrides:
        clone in class com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>
      • setField

        public InputDataConfig.Builder setField​(com.google.protobuf.Descriptors.FieldDescriptor field,
                                                Object value)
        Specified by:
        setField in interface com.google.protobuf.Message.Builder
        Overrides:
        setField in class com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>
      • clearField

        public InputDataConfig.Builder clearField​(com.google.protobuf.Descriptors.FieldDescriptor field)
        Specified by:
        clearField in interface com.google.protobuf.Message.Builder
        Overrides:
        clearField in class com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>
      • clearOneof

        public InputDataConfig.Builder clearOneof​(com.google.protobuf.Descriptors.OneofDescriptor oneof)
        Specified by:
        clearOneof in interface com.google.protobuf.Message.Builder
        Overrides:
        clearOneof in class com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>
      • setRepeatedField

        public InputDataConfig.Builder setRepeatedField​(com.google.protobuf.Descriptors.FieldDescriptor field,
                                                        int index,
                                                        Object value)
        Specified by:
        setRepeatedField in interface com.google.protobuf.Message.Builder
        Overrides:
        setRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>
      • addRepeatedField

        public InputDataConfig.Builder addRepeatedField​(com.google.protobuf.Descriptors.FieldDescriptor field,
                                                        Object value)
        Specified by:
        addRepeatedField in interface com.google.protobuf.Message.Builder
        Overrides:
        addRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>
      • mergeFrom

        public InputDataConfig.Builder mergeFrom​(com.google.protobuf.Message other)
        Specified by:
        mergeFrom in interface com.google.protobuf.Message.Builder
        Overrides:
        mergeFrom in class com.google.protobuf.AbstractMessage.Builder<InputDataConfig.Builder>
      • isInitialized

        public final boolean isInitialized()
        Specified by:
        isInitialized in interface com.google.protobuf.MessageLiteOrBuilder
        Overrides:
        isInitialized in class com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>
      • mergeFrom

        public InputDataConfig.Builder mergeFrom​(com.google.protobuf.CodedInputStream input,
                                                 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
                                          throws IOException
        Specified by:
        mergeFrom in interface com.google.protobuf.Message.Builder
        Specified by:
        mergeFrom in interface com.google.protobuf.MessageLite.Builder
        Overrides:
        mergeFrom in class com.google.protobuf.AbstractMessage.Builder<InputDataConfig.Builder>
        Throws:
        IOException
      • hasFractionSplit

        public boolean hasFractionSplit()
         Split based on fractions defining the size of each set.
         
        .google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;
        Specified by:
        hasFractionSplit in interface InputDataConfigOrBuilder
        Returns:
        Whether the fractionSplit field is set.
      • getFractionSplit

        public FractionSplit getFractionSplit()
         Split based on fractions defining the size of each set.
         
        .google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;
        Specified by:
        getFractionSplit in interface InputDataConfigOrBuilder
        Returns:
        The fractionSplit.
      • setFractionSplit

        public InputDataConfig.Builder setFractionSplit​(FractionSplit value)
         Split based on fractions defining the size of each set.
         
        .google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;
      • setFractionSplit

        public InputDataConfig.Builder setFractionSplit​(FractionSplit.Builder builderForValue)
         Split based on fractions defining the size of each set.
         
        .google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;
      • mergeFractionSplit

        public InputDataConfig.Builder mergeFractionSplit​(FractionSplit value)
         Split based on fractions defining the size of each set.
         
        .google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;
      • clearFractionSplit

        public InputDataConfig.Builder clearFractionSplit()
         Split based on fractions defining the size of each set.
         
        .google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;
      • getFractionSplitBuilder

        public FractionSplit.Builder getFractionSplitBuilder()
         Split based on fractions defining the size of each set.
         
        .google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;
      • hasFilterSplit

        public boolean hasFilterSplit()
         Split based on the provided filters for each set.
         
        .google.cloud.aiplatform.v1.FilterSplit filter_split = 3;
        Specified by:
        hasFilterSplit in interface InputDataConfigOrBuilder
        Returns:
        Whether the filterSplit field is set.
      • getFilterSplit

        public FilterSplit getFilterSplit()
         Split based on the provided filters for each set.
         
        .google.cloud.aiplatform.v1.FilterSplit filter_split = 3;
        Specified by:
        getFilterSplit in interface InputDataConfigOrBuilder
        Returns:
        The filterSplit.
      • setFilterSplit

        public InputDataConfig.Builder setFilterSplit​(FilterSplit value)
         Split based on the provided filters for each set.
         
        .google.cloud.aiplatform.v1.FilterSplit filter_split = 3;
      • setFilterSplit

        public InputDataConfig.Builder setFilterSplit​(FilterSplit.Builder builderForValue)
         Split based on the provided filters for each set.
         
        .google.cloud.aiplatform.v1.FilterSplit filter_split = 3;
      • mergeFilterSplit

        public InputDataConfig.Builder mergeFilterSplit​(FilterSplit value)
         Split based on the provided filters for each set.
         
        .google.cloud.aiplatform.v1.FilterSplit filter_split = 3;
      • clearFilterSplit

        public InputDataConfig.Builder clearFilterSplit()
         Split based on the provided filters for each set.
         
        .google.cloud.aiplatform.v1.FilterSplit filter_split = 3;
      • getFilterSplitBuilder

        public FilterSplit.Builder getFilterSplitBuilder()
         Split based on the provided filters for each set.
         
        .google.cloud.aiplatform.v1.FilterSplit filter_split = 3;
      • hasPredefinedSplit

        public boolean hasPredefinedSplit()
         Supported only for tabular Datasets.
        
         Split based on a predefined key.
         
        .google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;
        Specified by:
        hasPredefinedSplit in interface InputDataConfigOrBuilder
        Returns:
        Whether the predefinedSplit field is set.
      • getPredefinedSplit

        public PredefinedSplit getPredefinedSplit()
         Supported only for tabular Datasets.
        
         Split based on a predefined key.
         
        .google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;
        Specified by:
        getPredefinedSplit in interface InputDataConfigOrBuilder
        Returns:
        The predefinedSplit.
      • setPredefinedSplit

        public InputDataConfig.Builder setPredefinedSplit​(PredefinedSplit value)
         Supported only for tabular Datasets.
        
         Split based on a predefined key.
         
        .google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;
      • setPredefinedSplit

        public InputDataConfig.Builder setPredefinedSplit​(PredefinedSplit.Builder builderForValue)
         Supported only for tabular Datasets.
        
         Split based on a predefined key.
         
        .google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;
      • mergePredefinedSplit

        public InputDataConfig.Builder mergePredefinedSplit​(PredefinedSplit value)
         Supported only for tabular Datasets.
        
         Split based on a predefined key.
         
        .google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;
      • clearPredefinedSplit

        public InputDataConfig.Builder clearPredefinedSplit()
         Supported only for tabular Datasets.
        
         Split based on a predefined key.
         
        .google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;
      • getPredefinedSplitBuilder

        public PredefinedSplit.Builder getPredefinedSplitBuilder()
         Supported only for tabular Datasets.
        
         Split based on a predefined key.
         
        .google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;
      • hasTimestampSplit

        public boolean hasTimestampSplit()
         Supported only for tabular Datasets.
        
         Split based on the timestamp of the input data pieces.
         
        .google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;
        Specified by:
        hasTimestampSplit in interface InputDataConfigOrBuilder
        Returns:
        Whether the timestampSplit field is set.
      • getTimestampSplit

        public TimestampSplit getTimestampSplit()
         Supported only for tabular Datasets.
        
         Split based on the timestamp of the input data pieces.
         
        .google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;
        Specified by:
        getTimestampSplit in interface InputDataConfigOrBuilder
        Returns:
        The timestampSplit.
      • setTimestampSplit

        public InputDataConfig.Builder setTimestampSplit​(TimestampSplit value)
         Supported only for tabular Datasets.
        
         Split based on the timestamp of the input data pieces.
         
        .google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;
      • setTimestampSplit

        public InputDataConfig.Builder setTimestampSplit​(TimestampSplit.Builder builderForValue)
         Supported only for tabular Datasets.
        
         Split based on the timestamp of the input data pieces.
         
        .google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;
      • mergeTimestampSplit

        public InputDataConfig.Builder mergeTimestampSplit​(TimestampSplit value)
         Supported only for tabular Datasets.
        
         Split based on the timestamp of the input data pieces.
         
        .google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;
      • clearTimestampSplit

        public InputDataConfig.Builder clearTimestampSplit()
         Supported only for tabular Datasets.
        
         Split based on the timestamp of the input data pieces.
         
        .google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;
      • getTimestampSplitBuilder

        public TimestampSplit.Builder getTimestampSplitBuilder()
         Supported only for tabular Datasets.
        
         Split based on the timestamp of the input data pieces.
         
        .google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;
      • hasStratifiedSplit

        public boolean hasStratifiedSplit()
         Supported only for tabular Datasets.
        
         Split based on the distribution of the specified column.
         
        .google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;
        Specified by:
        hasStratifiedSplit in interface InputDataConfigOrBuilder
        Returns:
        Whether the stratifiedSplit field is set.
      • getStratifiedSplit

        public StratifiedSplit getStratifiedSplit()
         Supported only for tabular Datasets.
        
         Split based on the distribution of the specified column.
         
        .google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;
        Specified by:
        getStratifiedSplit in interface InputDataConfigOrBuilder
        Returns:
        The stratifiedSplit.
      • setStratifiedSplit

        public InputDataConfig.Builder setStratifiedSplit​(StratifiedSplit value)
         Supported only for tabular Datasets.
        
         Split based on the distribution of the specified column.
         
        .google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;
      • setStratifiedSplit

        public InputDataConfig.Builder setStratifiedSplit​(StratifiedSplit.Builder builderForValue)
         Supported only for tabular Datasets.
        
         Split based on the distribution of the specified column.
         
        .google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;
      • mergeStratifiedSplit

        public InputDataConfig.Builder mergeStratifiedSplit​(StratifiedSplit value)
         Supported only for tabular Datasets.
        
         Split based on the distribution of the specified column.
         
        .google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;
      • clearStratifiedSplit

        public InputDataConfig.Builder clearStratifiedSplit()
         Supported only for tabular Datasets.
        
         Split based on the distribution of the specified column.
         
        .google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;
      • getStratifiedSplitBuilder

        public StratifiedSplit.Builder getStratifiedSplitBuilder()
         Supported only for tabular Datasets.
        
         Split based on the distribution of the specified column.
         
        .google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;
      • hasGcsDestination

        public boolean hasGcsDestination()
         The Cloud Storage location where the training data is to be
         written to. In the given directory a new directory is created with
         name:
         `dataset-<dataset-id>-<annotation-type>-<timestamp-of-training-call>`
         where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
         All training input data is written into that directory.
        
         The Vertex AI environment variables representing Cloud Storage
         data URIs are represented in the Cloud Storage wildcard
         format to support sharded data. e.g.: "gs://.../training-*.jsonl"
        
         * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
         * AIP_TRAINING_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/training-*.${AIP_DATA_FORMAT}"
        
         * AIP_VALIDATION_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/validation-*.${AIP_DATA_FORMAT}"
        
         * AIP_TEST_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/test-*.${AIP_DATA_FORMAT}"
         
        .google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;
        Specified by:
        hasGcsDestination in interface InputDataConfigOrBuilder
        Returns:
        Whether the gcsDestination field is set.
      • getGcsDestination

        public GcsDestination getGcsDestination()
         The Cloud Storage location where the training data is to be
         written to. In the given directory a new directory is created with
         name:
         `dataset-<dataset-id>-<annotation-type>-<timestamp-of-training-call>`
         where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
         All training input data is written into that directory.
        
         The Vertex AI environment variables representing Cloud Storage
         data URIs are represented in the Cloud Storage wildcard
         format to support sharded data. e.g.: "gs://.../training-*.jsonl"
        
         * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
         * AIP_TRAINING_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/training-*.${AIP_DATA_FORMAT}"
        
         * AIP_VALIDATION_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/validation-*.${AIP_DATA_FORMAT}"
        
         * AIP_TEST_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/test-*.${AIP_DATA_FORMAT}"
         
        .google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;
        Specified by:
        getGcsDestination in interface InputDataConfigOrBuilder
        Returns:
        The gcsDestination.
      • setGcsDestination

        public InputDataConfig.Builder setGcsDestination​(GcsDestination value)
         The Cloud Storage location where the training data is to be
         written to. In the given directory a new directory is created with
         name:
         `dataset-<dataset-id>-<annotation-type>-<timestamp-of-training-call>`
         where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
         All training input data is written into that directory.
        
         The Vertex AI environment variables representing Cloud Storage
         data URIs are represented in the Cloud Storage wildcard
         format to support sharded data. e.g.: "gs://.../training-*.jsonl"
        
         * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
         * AIP_TRAINING_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/training-*.${AIP_DATA_FORMAT}"
        
         * AIP_VALIDATION_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/validation-*.${AIP_DATA_FORMAT}"
        
         * AIP_TEST_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/test-*.${AIP_DATA_FORMAT}"
         
        .google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;
      • setGcsDestination

        public InputDataConfig.Builder setGcsDestination​(GcsDestination.Builder builderForValue)
         The Cloud Storage location where the training data is to be
         written to. In the given directory a new directory is created with
         name:
         `dataset-<dataset-id>-<annotation-type>-<timestamp-of-training-call>`
         where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
         All training input data is written into that directory.
        
         The Vertex AI environment variables representing Cloud Storage
         data URIs are represented in the Cloud Storage wildcard
         format to support sharded data. e.g.: "gs://.../training-*.jsonl"
        
         * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
         * AIP_TRAINING_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/training-*.${AIP_DATA_FORMAT}"
        
         * AIP_VALIDATION_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/validation-*.${AIP_DATA_FORMAT}"
        
         * AIP_TEST_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/test-*.${AIP_DATA_FORMAT}"
         
        .google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;
      • mergeGcsDestination

        public InputDataConfig.Builder mergeGcsDestination​(GcsDestination value)
         The Cloud Storage location where the training data is to be
         written to. In the given directory a new directory is created with
         name:
         `dataset-<dataset-id>-<annotation-type>-<timestamp-of-training-call>`
         where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
         All training input data is written into that directory.
        
         The Vertex AI environment variables representing Cloud Storage
         data URIs are represented in the Cloud Storage wildcard
         format to support sharded data. e.g.: "gs://.../training-*.jsonl"
        
         * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
         * AIP_TRAINING_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/training-*.${AIP_DATA_FORMAT}"
        
         * AIP_VALIDATION_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/validation-*.${AIP_DATA_FORMAT}"
        
         * AIP_TEST_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/test-*.${AIP_DATA_FORMAT}"
         
        .google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;
      • clearGcsDestination

        public InputDataConfig.Builder clearGcsDestination()
         The Cloud Storage location where the training data is to be
         written to. In the given directory a new directory is created with
         name:
         `dataset-<dataset-id>-<annotation-type>-<timestamp-of-training-call>`
         where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
         All training input data is written into that directory.
        
         The Vertex AI environment variables representing Cloud Storage
         data URIs are represented in the Cloud Storage wildcard
         format to support sharded data. e.g.: "gs://.../training-*.jsonl"
        
         * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
         * AIP_TRAINING_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/training-*.${AIP_DATA_FORMAT}"
        
         * AIP_VALIDATION_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/validation-*.${AIP_DATA_FORMAT}"
        
         * AIP_TEST_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/test-*.${AIP_DATA_FORMAT}"
         
        .google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;
      • getGcsDestinationBuilder

        public GcsDestination.Builder getGcsDestinationBuilder()
         The Cloud Storage location where the training data is to be
         written to. In the given directory a new directory is created with
         name:
         `dataset-<dataset-id>-<annotation-type>-<timestamp-of-training-call>`
         where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
         All training input data is written into that directory.
        
         The Vertex AI environment variables representing Cloud Storage
         data URIs are represented in the Cloud Storage wildcard
         format to support sharded data. e.g.: "gs://.../training-*.jsonl"
        
         * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
         * AIP_TRAINING_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/training-*.${AIP_DATA_FORMAT}"
        
         * AIP_VALIDATION_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/validation-*.${AIP_DATA_FORMAT}"
        
         * AIP_TEST_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/test-*.${AIP_DATA_FORMAT}"
         
        .google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;
      • getGcsDestinationOrBuilder

        public GcsDestinationOrBuilder getGcsDestinationOrBuilder()
         The Cloud Storage location where the training data is to be
         written to. In the given directory a new directory is created with
         name:
         `dataset-<dataset-id>-<annotation-type>-<timestamp-of-training-call>`
         where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
         All training input data is written into that directory.
        
         The Vertex AI environment variables representing Cloud Storage
         data URIs are represented in the Cloud Storage wildcard
         format to support sharded data. e.g.: "gs://.../training-*.jsonl"
        
         * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
         * AIP_TRAINING_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/training-*.${AIP_DATA_FORMAT}"
        
         * AIP_VALIDATION_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/validation-*.${AIP_DATA_FORMAT}"
        
         * AIP_TEST_DATA_URI =
         "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/test-*.${AIP_DATA_FORMAT}"
         
        .google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;
        Specified by:
        getGcsDestinationOrBuilder in interface InputDataConfigOrBuilder
      • hasBigqueryDestination

        public boolean hasBigqueryDestination()
         Only applicable to custom training with tabular Dataset with BigQuery
         source.
        
         The BigQuery project location where the training data is to be written
         to. In the given project a new dataset is created with name
         `dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`
         where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
         input data is written into that dataset. In the dataset three
         tables are created, `training`, `validation` and `test`.
        
         * AIP_DATA_FORMAT = "bigquery".
         * AIP_TRAINING_DATA_URI  =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.training"
        
         * AIP_VALIDATION_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.validation"
        
         * AIP_TEST_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.test"
         
        .google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;
        Specified by:
        hasBigqueryDestination in interface InputDataConfigOrBuilder
        Returns:
        Whether the bigqueryDestination field is set.
      • getBigqueryDestination

        public BigQueryDestination getBigqueryDestination()
         Only applicable to custom training with tabular Dataset with BigQuery
         source.
        
         The BigQuery project location where the training data is to be written
         to. In the given project a new dataset is created with name
         `dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`
         where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
         input data is written into that dataset. In the dataset three
         tables are created, `training`, `validation` and `test`.
        
         * AIP_DATA_FORMAT = "bigquery".
         * AIP_TRAINING_DATA_URI  =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.training"
        
         * AIP_VALIDATION_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.validation"
        
         * AIP_TEST_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.test"
         
        .google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;
        Specified by:
        getBigqueryDestination in interface InputDataConfigOrBuilder
        Returns:
        The bigqueryDestination.
      • setBigqueryDestination

        public InputDataConfig.Builder setBigqueryDestination​(BigQueryDestination value)
         Only applicable to custom training with tabular Dataset with BigQuery
         source.
        
         The BigQuery project location where the training data is to be written
         to. In the given project a new dataset is created with name
         `dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`
         where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
         input data is written into that dataset. In the dataset three
         tables are created, `training`, `validation` and `test`.
        
         * AIP_DATA_FORMAT = "bigquery".
         * AIP_TRAINING_DATA_URI  =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.training"
        
         * AIP_VALIDATION_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.validation"
        
         * AIP_TEST_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.test"
         
        .google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;
      • setBigqueryDestination

        public InputDataConfig.Builder setBigqueryDestination​(BigQueryDestination.Builder builderForValue)
         Only applicable to custom training with tabular Dataset with BigQuery
         source.
        
         The BigQuery project location where the training data is to be written
         to. In the given project a new dataset is created with name
         `dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`
         where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
         input data is written into that dataset. In the dataset three
         tables are created, `training`, `validation` and `test`.
        
         * AIP_DATA_FORMAT = "bigquery".
         * AIP_TRAINING_DATA_URI  =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.training"
        
         * AIP_VALIDATION_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.validation"
        
         * AIP_TEST_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.test"
         
        .google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;
      • mergeBigqueryDestination

        public InputDataConfig.Builder mergeBigqueryDestination​(BigQueryDestination value)
         Only applicable to custom training with tabular Dataset with BigQuery
         source.
        
         The BigQuery project location where the training data is to be written
         to. In the given project a new dataset is created with name
         `dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`
         where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
         input data is written into that dataset. In the dataset three
         tables are created, `training`, `validation` and `test`.
        
         * AIP_DATA_FORMAT = "bigquery".
         * AIP_TRAINING_DATA_URI  =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.training"
        
         * AIP_VALIDATION_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.validation"
        
         * AIP_TEST_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.test"
         
        .google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;
      • clearBigqueryDestination

        public InputDataConfig.Builder clearBigqueryDestination()
         Only applicable to custom training with tabular Dataset with BigQuery
         source.
        
         The BigQuery project location where the training data is to be written
         to. In the given project a new dataset is created with name
         `dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`
         where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
         input data is written into that dataset. In the dataset three
         tables are created, `training`, `validation` and `test`.
        
         * AIP_DATA_FORMAT = "bigquery".
         * AIP_TRAINING_DATA_URI  =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.training"
        
         * AIP_VALIDATION_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.validation"
        
         * AIP_TEST_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.test"
         
        .google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;
      • getBigqueryDestinationBuilder

        public BigQueryDestination.Builder getBigqueryDestinationBuilder()
         Only applicable to custom training with tabular Dataset with BigQuery
         source.
        
         The BigQuery project location where the training data is to be written
         to. In the given project a new dataset is created with name
         `dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`
         where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
         input data is written into that dataset. In the dataset three
         tables are created, `training`, `validation` and `test`.
        
         * AIP_DATA_FORMAT = "bigquery".
         * AIP_TRAINING_DATA_URI  =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.training"
        
         * AIP_VALIDATION_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.validation"
        
         * AIP_TEST_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.test"
         
        .google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;
      • getBigqueryDestinationOrBuilder

        public BigQueryDestinationOrBuilder getBigqueryDestinationOrBuilder()
         Only applicable to custom training with tabular Dataset with BigQuery
         source.
        
         The BigQuery project location where the training data is to be written
         to. In the given project a new dataset is created with name
         `dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`
         where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
         input data is written into that dataset. In the dataset three
         tables are created, `training`, `validation` and `test`.
        
         * AIP_DATA_FORMAT = "bigquery".
         * AIP_TRAINING_DATA_URI  =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.training"
        
         * AIP_VALIDATION_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.validation"
        
         * AIP_TEST_DATA_URI =
         "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.test"
         
        .google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;
        Specified by:
        getBigqueryDestinationOrBuilder in interface InputDataConfigOrBuilder
      • getDatasetId

        public String getDatasetId()
         Required. The ID of the Dataset in the same Project and Location which data
         will be used to train the Model. The Dataset must use schema compatible
         with Model being trained, and what is compatible should be described in the
         used TrainingPipeline's [training_task_definition]
         [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
         For tabular Datasets, all their data is exported to training, to pick
         and choose from.
         
        string dataset_id = 1 [(.google.api.field_behavior) = REQUIRED];
        Specified by:
        getDatasetId in interface InputDataConfigOrBuilder
        Returns:
        The datasetId.
      • getDatasetIdBytes

        public com.google.protobuf.ByteString getDatasetIdBytes()
         Required. The ID of the Dataset in the same Project and Location which data
         will be used to train the Model. The Dataset must use schema compatible
         with Model being trained, and what is compatible should be described in the
         used TrainingPipeline's [training_task_definition]
         [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
         For tabular Datasets, all their data is exported to training, to pick
         and choose from.
         
        string dataset_id = 1 [(.google.api.field_behavior) = REQUIRED];
        Specified by:
        getDatasetIdBytes in interface InputDataConfigOrBuilder
        Returns:
        The bytes for datasetId.
      • setDatasetId

        public InputDataConfig.Builder setDatasetId​(String value)
         Required. The ID of the Dataset in the same Project and Location which data
         will be used to train the Model. The Dataset must use schema compatible
         with Model being trained, and what is compatible should be described in the
         used TrainingPipeline's [training_task_definition]
         [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
         For tabular Datasets, all their data is exported to training, to pick
         and choose from.
         
        string dataset_id = 1 [(.google.api.field_behavior) = REQUIRED];
        Parameters:
        value - The datasetId to set.
        Returns:
        This builder for chaining.
      • clearDatasetId

        public InputDataConfig.Builder clearDatasetId()
         Required. The ID of the Dataset in the same Project and Location which data
         will be used to train the Model. The Dataset must use schema compatible
         with Model being trained, and what is compatible should be described in the
         used TrainingPipeline's [training_task_definition]
         [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
         For tabular Datasets, all their data is exported to training, to pick
         and choose from.
         
        string dataset_id = 1 [(.google.api.field_behavior) = REQUIRED];
        Returns:
        This builder for chaining.
      • setDatasetIdBytes

        public InputDataConfig.Builder setDatasetIdBytes​(com.google.protobuf.ByteString value)
         Required. The ID of the Dataset in the same Project and Location which data
         will be used to train the Model. The Dataset must use schema compatible
         with Model being trained, and what is compatible should be described in the
         used TrainingPipeline's [training_task_definition]
         [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
         For tabular Datasets, all their data is exported to training, to pick
         and choose from.
         
        string dataset_id = 1 [(.google.api.field_behavior) = REQUIRED];
        Parameters:
        value - The bytes for datasetId to set.
        Returns:
        This builder for chaining.
      • getAnnotationsFilter

        public String getAnnotationsFilter()
         Applicable only to Datasets that have DataItems and Annotations.
        
         A filter on Annotations of the Dataset. Only Annotations that both
         match this filter and belong to DataItems not ignored by the split method
         are used in respectively training, validation or test role, depending on
         the role of the DataItem they are on (for the auto-assigned that role is
         decided by Vertex AI). A filter with same syntax as the one used in
         [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]
         may be used, but note here it filters across all Annotations of the
         Dataset, and not just within a single DataItem.
         
        string annotations_filter = 6;
        Specified by:
        getAnnotationsFilter in interface InputDataConfigOrBuilder
        Returns:
        The annotationsFilter.
      • getAnnotationsFilterBytes

        public com.google.protobuf.ByteString getAnnotationsFilterBytes()
         Applicable only to Datasets that have DataItems and Annotations.
        
         A filter on Annotations of the Dataset. Only Annotations that both
         match this filter and belong to DataItems not ignored by the split method
         are used in respectively training, validation or test role, depending on
         the role of the DataItem they are on (for the auto-assigned that role is
         decided by Vertex AI). A filter with same syntax as the one used in
         [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]
         may be used, but note here it filters across all Annotations of the
         Dataset, and not just within a single DataItem.
         
        string annotations_filter = 6;
        Specified by:
        getAnnotationsFilterBytes in interface InputDataConfigOrBuilder
        Returns:
        The bytes for annotationsFilter.
      • setAnnotationsFilter

        public InputDataConfig.Builder setAnnotationsFilter​(String value)
         Applicable only to Datasets that have DataItems and Annotations.
        
         A filter on Annotations of the Dataset. Only Annotations that both
         match this filter and belong to DataItems not ignored by the split method
         are used in respectively training, validation or test role, depending on
         the role of the DataItem they are on (for the auto-assigned that role is
         decided by Vertex AI). A filter with same syntax as the one used in
         [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]
         may be used, but note here it filters across all Annotations of the
         Dataset, and not just within a single DataItem.
         
        string annotations_filter = 6;
        Parameters:
        value - The annotationsFilter to set.
        Returns:
        This builder for chaining.
      • clearAnnotationsFilter

        public InputDataConfig.Builder clearAnnotationsFilter()
         Applicable only to Datasets that have DataItems and Annotations.
        
         A filter on Annotations of the Dataset. Only Annotations that both
         match this filter and belong to DataItems not ignored by the split method
         are used in respectively training, validation or test role, depending on
         the role of the DataItem they are on (for the auto-assigned that role is
         decided by Vertex AI). A filter with same syntax as the one used in
         [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]
         may be used, but note here it filters across all Annotations of the
         Dataset, and not just within a single DataItem.
         
        string annotations_filter = 6;
        Returns:
        This builder for chaining.
      • setAnnotationsFilterBytes

        public InputDataConfig.Builder setAnnotationsFilterBytes​(com.google.protobuf.ByteString value)
         Applicable only to Datasets that have DataItems and Annotations.
        
         A filter on Annotations of the Dataset. Only Annotations that both
         match this filter and belong to DataItems not ignored by the split method
         are used in respectively training, validation or test role, depending on
         the role of the DataItem they are on (for the auto-assigned that role is
         decided by Vertex AI). A filter with same syntax as the one used in
         [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]
         may be used, but note here it filters across all Annotations of the
         Dataset, and not just within a single DataItem.
         
        string annotations_filter = 6;
        Parameters:
        value - The bytes for annotationsFilter to set.
        Returns:
        This builder for chaining.
      • getAnnotationSchemaUri

        public String getAnnotationSchemaUri()
         Applicable only to custom training with Datasets that have DataItems and
         Annotations.
        
         Cloud Storage URI that points to a YAML file describing the annotation
         schema. The schema is defined as an OpenAPI 3.0.2 [Schema
         Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
         The schema files that can be used here are found in
         gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
         chosen schema must be consistent with
         [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the
         Dataset specified by
         [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
        
         Only Annotations that both match this schema and belong to DataItems not
         ignored by the split method are used in respectively training, validation
         or test role, depending on the role of the DataItem they are on.
        
         When used in conjunction with
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
         the Annotations used for training are filtered by both
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter]
         and
         [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
         
        string annotation_schema_uri = 9;
        Specified by:
        getAnnotationSchemaUri in interface InputDataConfigOrBuilder
        Returns:
        The annotationSchemaUri.
      • getAnnotationSchemaUriBytes

        public com.google.protobuf.ByteString getAnnotationSchemaUriBytes()
         Applicable only to custom training with Datasets that have DataItems and
         Annotations.
        
         Cloud Storage URI that points to a YAML file describing the annotation
         schema. The schema is defined as an OpenAPI 3.0.2 [Schema
         Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
         The schema files that can be used here are found in
         gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
         chosen schema must be consistent with
         [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the
         Dataset specified by
         [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
        
         Only Annotations that both match this schema and belong to DataItems not
         ignored by the split method are used in respectively training, validation
         or test role, depending on the role of the DataItem they are on.
        
         When used in conjunction with
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
         the Annotations used for training are filtered by both
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter]
         and
         [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
         
        string annotation_schema_uri = 9;
        Specified by:
        getAnnotationSchemaUriBytes in interface InputDataConfigOrBuilder
        Returns:
        The bytes for annotationSchemaUri.
      • setAnnotationSchemaUri

        public InputDataConfig.Builder setAnnotationSchemaUri​(String value)
         Applicable only to custom training with Datasets that have DataItems and
         Annotations.
        
         Cloud Storage URI that points to a YAML file describing the annotation
         schema. The schema is defined as an OpenAPI 3.0.2 [Schema
         Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
         The schema files that can be used here are found in
         gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
         chosen schema must be consistent with
         [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the
         Dataset specified by
         [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
        
         Only Annotations that both match this schema and belong to DataItems not
         ignored by the split method are used in respectively training, validation
         or test role, depending on the role of the DataItem they are on.
        
         When used in conjunction with
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
         the Annotations used for training are filtered by both
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter]
         and
         [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
         
        string annotation_schema_uri = 9;
        Parameters:
        value - The annotationSchemaUri to set.
        Returns:
        This builder for chaining.
      • clearAnnotationSchemaUri

        public InputDataConfig.Builder clearAnnotationSchemaUri()
         Applicable only to custom training with Datasets that have DataItems and
         Annotations.
        
         Cloud Storage URI that points to a YAML file describing the annotation
         schema. The schema is defined as an OpenAPI 3.0.2 [Schema
         Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
         The schema files that can be used here are found in
         gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
         chosen schema must be consistent with
         [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the
         Dataset specified by
         [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
        
         Only Annotations that both match this schema and belong to DataItems not
         ignored by the split method are used in respectively training, validation
         or test role, depending on the role of the DataItem they are on.
        
         When used in conjunction with
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
         the Annotations used for training are filtered by both
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter]
         and
         [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
         
        string annotation_schema_uri = 9;
        Returns:
        This builder for chaining.
      • setAnnotationSchemaUriBytes

        public InputDataConfig.Builder setAnnotationSchemaUriBytes​(com.google.protobuf.ByteString value)
         Applicable only to custom training with Datasets that have DataItems and
         Annotations.
        
         Cloud Storage URI that points to a YAML file describing the annotation
         schema. The schema is defined as an OpenAPI 3.0.2 [Schema
         Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
         The schema files that can be used here are found in
         gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
         chosen schema must be consistent with
         [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the
         Dataset specified by
         [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
        
         Only Annotations that both match this schema and belong to DataItems not
         ignored by the split method are used in respectively training, validation
         or test role, depending on the role of the DataItem they are on.
        
         When used in conjunction with
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
         the Annotations used for training are filtered by both
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter]
         and
         [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
         
        string annotation_schema_uri = 9;
        Parameters:
        value - The bytes for annotationSchemaUri to set.
        Returns:
        This builder for chaining.
      • getSavedQueryId

        public String getSavedQueryId()
         Only applicable to Datasets that have SavedQueries.
        
         The ID of a SavedQuery (annotation set) under the Dataset specified by
         [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id] used
         for filtering Annotations for training.
        
         Only Annotations that are associated with this SavedQuery are used in
         respectively training. When used in conjunction with
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
         the Annotations used for training are filtered by both
         [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
         and
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter].
        
         Only one of
         [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
         and
         [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]
         should be specified as both of them represent the same thing: problem type.
         
        string saved_query_id = 7;
        Specified by:
        getSavedQueryId in interface InputDataConfigOrBuilder
        Returns:
        The savedQueryId.
      • getSavedQueryIdBytes

        public com.google.protobuf.ByteString getSavedQueryIdBytes()
         Only applicable to Datasets that have SavedQueries.
        
         The ID of a SavedQuery (annotation set) under the Dataset specified by
         [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id] used
         for filtering Annotations for training.
        
         Only Annotations that are associated with this SavedQuery are used in
         respectively training. When used in conjunction with
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
         the Annotations used for training are filtered by both
         [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
         and
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter].
        
         Only one of
         [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
         and
         [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]
         should be specified as both of them represent the same thing: problem type.
         
        string saved_query_id = 7;
        Specified by:
        getSavedQueryIdBytes in interface InputDataConfigOrBuilder
        Returns:
        The bytes for savedQueryId.
      • setSavedQueryId

        public InputDataConfig.Builder setSavedQueryId​(String value)
         Only applicable to Datasets that have SavedQueries.
        
         The ID of a SavedQuery (annotation set) under the Dataset specified by
         [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id] used
         for filtering Annotations for training.
        
         Only Annotations that are associated with this SavedQuery are used in
         respectively training. When used in conjunction with
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
         the Annotations used for training are filtered by both
         [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
         and
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter].
        
         Only one of
         [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
         and
         [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]
         should be specified as both of them represent the same thing: problem type.
         
        string saved_query_id = 7;
        Parameters:
        value - The savedQueryId to set.
        Returns:
        This builder for chaining.
      • clearSavedQueryId

        public InputDataConfig.Builder clearSavedQueryId()
         Only applicable to Datasets that have SavedQueries.
        
         The ID of a SavedQuery (annotation set) under the Dataset specified by
         [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id] used
         for filtering Annotations for training.
        
         Only Annotations that are associated with this SavedQuery are used in
         respectively training. When used in conjunction with
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
         the Annotations used for training are filtered by both
         [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
         and
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter].
        
         Only one of
         [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
         and
         [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]
         should be specified as both of them represent the same thing: problem type.
         
        string saved_query_id = 7;
        Returns:
        This builder for chaining.
      • setSavedQueryIdBytes

        public InputDataConfig.Builder setSavedQueryIdBytes​(com.google.protobuf.ByteString value)
         Only applicable to Datasets that have SavedQueries.
        
         The ID of a SavedQuery (annotation set) under the Dataset specified by
         [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id] used
         for filtering Annotations for training.
        
         Only Annotations that are associated with this SavedQuery are used in
         respectively training. When used in conjunction with
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
         the Annotations used for training are filtered by both
         [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
         and
         [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter].
        
         Only one of
         [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
         and
         [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]
         should be specified as both of them represent the same thing: problem type.
         
        string saved_query_id = 7;
        Parameters:
        value - The bytes for savedQueryId to set.
        Returns:
        This builder for chaining.
      • getPersistMlUseAssignment

        public boolean getPersistMlUseAssignment()
         Whether to persist the ML use assignment to data item system labels.
         
        bool persist_ml_use_assignment = 11;
        Specified by:
        getPersistMlUseAssignment in interface InputDataConfigOrBuilder
        Returns:
        The persistMlUseAssignment.
      • setPersistMlUseAssignment

        public InputDataConfig.Builder setPersistMlUseAssignment​(boolean value)
         Whether to persist the ML use assignment to data item system labels.
         
        bool persist_ml_use_assignment = 11;
        Parameters:
        value - The persistMlUseAssignment to set.
        Returns:
        This builder for chaining.
      • clearPersistMlUseAssignment

        public InputDataConfig.Builder clearPersistMlUseAssignment()
         Whether to persist the ML use assignment to data item system labels.
         
        bool persist_ml_use_assignment = 11;
        Returns:
        This builder for chaining.
      • setUnknownFields

        public final InputDataConfig.Builder setUnknownFields​(com.google.protobuf.UnknownFieldSet unknownFields)
        Specified by:
        setUnknownFields in interface com.google.protobuf.Message.Builder
        Overrides:
        setUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>
      • mergeUnknownFields

        public final InputDataConfig.Builder mergeUnknownFields​(com.google.protobuf.UnknownFieldSet unknownFields)
        Specified by:
        mergeUnknownFields in interface com.google.protobuf.Message.Builder
        Overrides:
        mergeUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<InputDataConfig.Builder>